blob: 22c3467012cc0d45fcdf47bd3d91a0df979ba7a2 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
38 return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
39}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
46 return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
47}
48
49static ssize_t
50sysfs_show_ptpool_chunks(struct kobject *kobj,
51 struct kobj_attribute *attr,
52 char *buf)
53{
54 return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
55}
56
57static ssize_t
58sysfs_show_ptpool_ptsize(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 char *buf)
61{
62 return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
63}
64
65static struct kobj_attribute attr_ptpool_entries = {
66 .attr = { .name = "ptpool_entries", .mode = 0444 },
67 .show = sysfs_show_ptpool_entries,
68 .store = NULL,
69};
70
71static struct kobj_attribute attr_ptpool_min = {
72 .attr = { .name = "ptpool_min", .mode = 0444 },
73 .show = sysfs_show_ptpool_min,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_chunks = {
78 .attr = { .name = "ptpool_chunks", .mode = 0444 },
79 .show = sysfs_show_ptpool_chunks,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_ptsize = {
84 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
85 .show = sysfs_show_ptpool_ptsize,
86 .store = NULL,
87};
88
89static struct attribute *ptpool_attrs[] = {
90 &attr_ptpool_entries.attr,
91 &attr_ptpool_min.attr,
92 &attr_ptpool_chunks.attr,
93 &attr_ptpool_ptsize.attr,
94 NULL,
95};
96
97static struct attribute_group ptpool_attr_group = {
98 .attrs = ptpool_attrs,
99};
100
101static int
102_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
103{
104 struct kgsl_ptpool_chunk *chunk;
105 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
106
107 BUG_ON(count == 0);
108
109 if (get_order(size) >= MAX_ORDER) {
110 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
111 return -EINVAL;
112 }
113
114 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
115 if (chunk == NULL) {
116 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
117 return -ENOMEM;
118 }
119
120 chunk->size = size;
121 chunk->count = count;
122 chunk->dynamic = dynamic;
123
124 chunk->data = dma_alloc_coherent(NULL, size,
125 &chunk->phys, GFP_KERNEL);
126
127 if (chunk->data == NULL) {
128 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
129 goto err;
130 }
131
132 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
133
134 if (chunk->bitmap == NULL) {
135 KGSL_CORE_ERR("kzalloc(%d) failed\n",
136 BITS_TO_LONGS(count) * 4);
137 goto err_dma;
138 }
139
140 list_add_tail(&chunk->list, &pool->list);
141
142 pool->chunks++;
143 pool->entries += count;
144
145 if (!dynamic)
146 pool->static_entries += count;
147
148 return 0;
149
150err_dma:
151 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
152err:
153 kfree(chunk);
154 return -ENOMEM;
155}
156
157static void *
158_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
159{
160 struct kgsl_ptpool_chunk *chunk;
161
162 list_for_each_entry(chunk, &pool->list, list) {
163 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
164
165 if (bit >= chunk->count)
166 continue;
167
168 set_bit(bit, chunk->bitmap);
169 *physaddr = chunk->phys + (bit * pool->ptsize);
170
171 return chunk->data + (bit * pool->ptsize);
172 }
173
174 return NULL;
175}
176
177/**
178 * kgsl_ptpool_add
179 * @pool: A pointer to a ptpool structure
180 * @entries: Number of entries to add
181 *
182 * Add static entries to the pagetable pool.
183 */
184
185int
186kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
187{
188 int ret = 0;
189 BUG_ON(count == 0);
190
191 mutex_lock(&pool->lock);
192
193 /* Only 4MB can be allocated in one chunk, so larger allocations
194 need to be split into multiple sections */
195
196 while (count) {
197 int entries = ((count * pool->ptsize) > SZ_4M) ?
198 SZ_4M / pool->ptsize : count;
199
200 /* Add the entries as static, i.e. they don't ever stand
201 a chance of being removed */
202
203 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
204 if (ret)
205 break;
206
207 count -= entries;
208 }
209
210 mutex_unlock(&pool->lock);
211 return ret;
212}
213
214/**
215 * kgsl_ptpool_alloc
216 * @pool: A pointer to a ptpool structure
217 * @addr: A pointer to store the physical address of the chunk
218 *
219 * Allocate a pagetable from the pool. Returns the virtual address
220 * of the pagetable, the physical address is returned in physaddr
221 */
222
223void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
224{
225 void *addr = NULL;
226 int ret;
227
228 mutex_lock(&pool->lock);
229 addr = _kgsl_ptpool_get_entry(pool, physaddr);
230 if (addr)
231 goto done;
232
233 /* Add a chunk for 1 more pagetable and mark it as dynamic */
234 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
235
236 if (ret)
237 goto done;
238
239 addr = _kgsl_ptpool_get_entry(pool, physaddr);
240done:
241 mutex_unlock(&pool->lock);
242 return addr;
243}
244
245static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
246{
247 list_del(&chunk->list);
248
249 if (chunk->data)
250 dma_free_coherent(NULL, chunk->size, chunk->data,
251 chunk->phys);
252 kfree(chunk->bitmap);
253 kfree(chunk);
254}
255
256/**
257 * kgsl_ptpool_free
258 * @pool: A pointer to a ptpool structure
259 * @addr: A pointer to the virtual address to free
260 *
261 * Free a pagetable allocated from the pool
262 */
263
264void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
265{
266 struct kgsl_ptpool_chunk *chunk, *tmp;
267
268 if (pool == NULL || addr == NULL)
269 return;
270
271 mutex_lock(&pool->lock);
272 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
273 if (addr >= chunk->data &&
274 addr < chunk->data + chunk->size) {
275 int bit = ((unsigned long) (addr - chunk->data)) /
276 pool->ptsize;
277
278 clear_bit(bit, chunk->bitmap);
279 memset(addr, 0, pool->ptsize);
280
281 if (chunk->dynamic &&
282 bitmap_empty(chunk->bitmap, chunk->count))
283 _kgsl_ptpool_rm_chunk(chunk);
284
285 break;
286 }
287 }
288
289 mutex_unlock(&pool->lock);
290}
291
292void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
293{
294 struct kgsl_ptpool_chunk *chunk, *tmp;
295
296 if (pool == NULL)
297 return;
298
299 mutex_lock(&pool->lock);
300 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
301 _kgsl_ptpool_rm_chunk(chunk);
302 mutex_unlock(&pool->lock);
303
304 memset(pool, 0, sizeof(*pool));
305}
306
307/**
308 * kgsl_ptpool_init
309 * @pool: A pointer to a ptpool structure to initialize
310 * @ptsize: The size of each pagetable entry
311 * @entries: The number of inital entries to add to the pool
312 *
313 * Initalize a pool and allocate an initial chunk of entries.
314 */
315
316int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
317{
318 int ret = 0;
319 BUG_ON(ptsize == 0);
320
321 pool->ptsize = ptsize;
322 mutex_init(&pool->lock);
323 INIT_LIST_HEAD(&pool->list);
324
325 if (entries) {
326 ret = kgsl_ptpool_add(pool, entries);
327 if (ret)
328 return ret;
329 }
330
331 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
332}
333
334static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
335{
336 int i;
337 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
338 struct kgsl_device *device = kgsl_driver.devp[i];
339 if (device)
340 device->ftbl->cleanup_pt(device, pt);
341 }
342 return 0;
343}
344
345static void kgsl_destroy_pagetable(struct kref *kref)
346{
347 struct kgsl_pagetable *pagetable = container_of(kref,
348 struct kgsl_pagetable, refcount);
349 unsigned long flags;
350
351 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
352 list_del(&pagetable->list);
353 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
354
355 pagetable_remove_sysfs_objects(pagetable);
356
357 kgsl_cleanup_pt(pagetable);
358
359 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
360
361 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
362
363 if (pagetable->pool)
364 gen_pool_destroy(pagetable->pool);
365
366 kfree(pagetable->tlbflushfilter.base);
367 kfree(pagetable);
368}
369
370static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
371{
372 if (pagetable)
373 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
374}
375
376static struct kgsl_pagetable *
377kgsl_get_pagetable(unsigned long name)
378{
379 struct kgsl_pagetable *pt, *ret = NULL;
380 unsigned long flags;
381
382 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
383 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
384 if (pt->name == name) {
385 ret = pt;
386 kref_get(&ret->refcount);
387 break;
388 }
389 }
390
391 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
392 return ret;
393}
394
395static struct kgsl_pagetable *
396_get_pt_from_kobj(struct kobject *kobj)
397{
398 unsigned long ptname;
399
400 if (!kobj)
401 return NULL;
402
403 if (sscanf(kobj->name, "%ld", &ptname) != 1)
404 return NULL;
405
406 return kgsl_get_pagetable(ptname);
407}
408
409static ssize_t
410sysfs_show_entries(struct kobject *kobj,
411 struct kobj_attribute *attr,
412 char *buf)
413{
414 struct kgsl_pagetable *pt;
415 int ret = 0;
416
417 pt = _get_pt_from_kobj(kobj);
418
419 if (pt)
420 ret += sprintf(buf, "%d\n", pt->stats.entries);
421
422 kgsl_put_pagetable(pt);
423 return ret;
424}
425
426static ssize_t
427sysfs_show_mapped(struct kobject *kobj,
428 struct kobj_attribute *attr,
429 char *buf)
430{
431 struct kgsl_pagetable *pt;
432 int ret = 0;
433
434 pt = _get_pt_from_kobj(kobj);
435
436 if (pt)
437 ret += sprintf(buf, "%d\n", pt->stats.mapped);
438
439 kgsl_put_pagetable(pt);
440 return ret;
441}
442
443static ssize_t
444sysfs_show_va_range(struct kobject *kobj,
445 struct kobj_attribute *attr,
446 char *buf)
447{
448 struct kgsl_pagetable *pt;
449 int ret = 0;
450
451 pt = _get_pt_from_kobj(kobj);
452
453 if (pt)
454 ret += sprintf(buf, "0x%x\n", pt->va_range);
455
456 kgsl_put_pagetable(pt);
457 return ret;
458}
459
460static ssize_t
461sysfs_show_max_mapped(struct kobject *kobj,
462 struct kobj_attribute *attr,
463 char *buf)
464{
465 struct kgsl_pagetable *pt;
466 int ret = 0;
467
468 pt = _get_pt_from_kobj(kobj);
469
470 if (pt)
471 ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
472
473 kgsl_put_pagetable(pt);
474 return ret;
475}
476
477static ssize_t
478sysfs_show_max_entries(struct kobject *kobj,
479 struct kobj_attribute *attr,
480 char *buf)
481{
482 struct kgsl_pagetable *pt;
483 int ret = 0;
484
485 pt = _get_pt_from_kobj(kobj);
486
487 if (pt)
488 ret += sprintf(buf, "%d\n", pt->stats.max_entries);
489
490 kgsl_put_pagetable(pt);
491 return ret;
492}
493
494static struct kobj_attribute attr_entries = {
495 .attr = { .name = "entries", .mode = 0444 },
496 .show = sysfs_show_entries,
497 .store = NULL,
498};
499
500static struct kobj_attribute attr_mapped = {
501 .attr = { .name = "mapped", .mode = 0444 },
502 .show = sysfs_show_mapped,
503 .store = NULL,
504};
505
506static struct kobj_attribute attr_va_range = {
507 .attr = { .name = "va_range", .mode = 0444 },
508 .show = sysfs_show_va_range,
509 .store = NULL,
510};
511
512static struct kobj_attribute attr_max_mapped = {
513 .attr = { .name = "max_mapped", .mode = 0444 },
514 .show = sysfs_show_max_mapped,
515 .store = NULL,
516};
517
518static struct kobj_attribute attr_max_entries = {
519 .attr = { .name = "max_entries", .mode = 0444 },
520 .show = sysfs_show_max_entries,
521 .store = NULL,
522};
523
524static struct attribute *pagetable_attrs[] = {
525 &attr_entries.attr,
526 &attr_mapped.attr,
527 &attr_va_range.attr,
528 &attr_max_mapped.attr,
529 &attr_max_entries.attr,
530 NULL,
531};
532
533static struct attribute_group pagetable_attr_group = {
534 .attrs = pagetable_attrs,
535};
536
537static void
538pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
539{
540 if (pagetable->kobj)
541 sysfs_remove_group(pagetable->kobj,
542 &pagetable_attr_group);
543
544 kobject_put(pagetable->kobj);
545}
546
547static int
548pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
549{
550 char ptname[16];
551 int ret = -ENOMEM;
552
553 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
554 pagetable->kobj = kobject_create_and_add(ptname,
555 kgsl_driver.ptkobj);
556 if (pagetable->kobj == NULL)
557 goto err;
558
559 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
560
561err:
562 if (ret) {
563 if (pagetable->kobj)
564 kobject_put(pagetable->kobj);
565
566 pagetable->kobj = NULL;
567 }
568
569 return ret;
570}
571
572static inline uint32_t
573kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
574{
575 return (va - pt->va_base) >> PAGE_SHIFT;
576}
577
578static inline void
579kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
580{
581 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
582
583 writel_relaxed(val, &baseptr[pte]);
584}
585
586static inline uint32_t
587kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
588{
589 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
590 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
591 return ret;
592}
593
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600594void kgsl_mmu_pagefault(struct kgsl_device *device)
595{
596 unsigned int reg;
597 unsigned int ptbase;
598 struct kgsl_pagetable *pt;
599 int ptid = -1;
600
601 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
602 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
603
604 spin_lock(&kgsl_driver.ptlock);
605 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
606 if (ptbase == pt->base.gpuaddr) {
607 ptid = (int) pt->name;
608 break;
609 }
610 }
611 spin_unlock(&kgsl_driver.ptlock);
612
613 KGSL_MEM_CRIT(device,
614 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
615 reg & ~(PAGE_SIZE - 1), ptid,
616 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
617}
618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619void kgsl_mh_intrcallback(struct kgsl_device *device)
620{
621 unsigned int status = 0;
622 unsigned int reg;
623
624 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
625 kgsl_regread(device, MH_AXI_ERROR, &reg);
626
627 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
628 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600629 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600631 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
632 kgsl_mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
634 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635}
636EXPORT_SYMBOL(kgsl_mh_intrcallback);
637
638static int kgsl_setup_pt(struct kgsl_pagetable *pt)
639{
640 int i = 0;
641 int status = 0;
642
643 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
644 struct kgsl_device *device = kgsl_driver.devp[i];
645 if (device) {
646 status = device->ftbl->setup_pt(device, pt);
647 if (status)
648 goto error_pt;
649 }
650 }
651 return status;
652error_pt:
653 while (i >= 0) {
654 struct kgsl_device *device = kgsl_driver.devp[i];
655 if (device)
656 device->ftbl->cleanup_pt(device, pt);
657 i--;
658 }
659 return status;
660}
661
662static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
663 unsigned int name)
664{
665 int status = 0;
666 struct kgsl_pagetable *pagetable = NULL;
667 unsigned long flags;
668
669 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
670 if (pagetable == NULL) {
671 KGSL_CORE_ERR("kzalloc(%d) failed\n",
672 sizeof(struct kgsl_pagetable));
673 return NULL;
674 }
675
676 kref_init(&pagetable->refcount);
677
678 spin_lock_init(&pagetable->lock);
679 pagetable->tlb_flags = 0;
680 pagetable->name = name;
681 pagetable->va_base = KGSL_PAGETABLE_BASE;
682 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
683 pagetable->last_superpte = 0;
684 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
685
686 pagetable->tlbflushfilter.size = (pagetable->va_range /
687 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
688 pagetable->tlbflushfilter.base = (unsigned int *)
689 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
690 if (!pagetable->tlbflushfilter.base) {
691 KGSL_CORE_ERR("kzalloc(%d) failed\n",
692 pagetable->tlbflushfilter.size);
693 goto err_alloc;
694 }
695 GSL_TLBFLUSH_FILTER_RESET();
696
697 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
698 if (pagetable->pool == NULL) {
699 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
700 goto err_flushfilter;
701 }
702
703 if (gen_pool_add(pagetable->pool, pagetable->va_base,
704 pagetable->va_range, -1)) {
705 KGSL_CORE_ERR("gen_pool_add failed\n");
706 goto err_pool;
707 }
708
709 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
710 &pagetable->base.physaddr);
711
712 if (pagetable->base.hostptr == NULL)
713 goto err_pool;
714
715 /* ptpool allocations are from coherent memory, so update the
716 device statistics acordingly */
717
718 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
719 kgsl_driver.stats.coherent_max);
720
721 pagetable->base.gpuaddr = pagetable->base.physaddr;
722 pagetable->base.size = KGSL_PAGETABLE_SIZE;
723
724 status = kgsl_setup_pt(pagetable);
725 if (status)
726 goto err_free_sharedmem;
727
728 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
729 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
730 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
731
732 /* Create the sysfs entries */
733 pagetable_add_sysfs_objects(pagetable);
734
735 return pagetable;
736
737err_free_sharedmem:
738 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
739err_pool:
740 gen_pool_destroy(pagetable->pool);
741err_flushfilter:
742 kfree(pagetable->tlbflushfilter.base);
743err_alloc:
744 kfree(pagetable);
745
746 return NULL;
747}
748
749struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
750{
751 struct kgsl_pagetable *pt;
752
753 pt = kgsl_get_pagetable(name);
754
755 if (pt == NULL)
756 pt = kgsl_mmu_createpagetableobject(name);
757
758 return pt;
759}
760
761void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
762{
763 kgsl_put_pagetable(pagetable);
764}
765
766void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
767{
768 if (!kgsl_mmu_enabled())
769 return;
770
771 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
772 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
773 kgsl_regwrite(device, MH_MMU_PT_BASE,
774 device->mmu.hwpagetable->base.gpuaddr);
775 }
776
777 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
778 /* Invalidate all and tc */
779 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
780 }
781}
782EXPORT_SYMBOL(kgsl_default_setstate);
783
784void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
785{
786 if (device->ftbl->setstate)
787 device->ftbl->setstate(device, flags);
788}
789EXPORT_SYMBOL(kgsl_setstate);
790
791void kgsl_mmu_setstate(struct kgsl_device *device,
792 struct kgsl_pagetable *pagetable)
793{
794 struct kgsl_mmu *mmu = &device->mmu;
795
796 if (mmu->flags & KGSL_FLAGS_STARTED) {
797 /* page table not current, then setup mmu to use new
798 * specified page table
799 */
800 if (mmu->hwpagetable != pagetable) {
801 mmu->hwpagetable = pagetable;
802 spin_lock(&mmu->hwpagetable->lock);
803 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
804 spin_unlock(&mmu->hwpagetable->lock);
805
806 /* call device specific set page table */
807 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
808 KGSL_MMUFLAGS_PTUPDATE);
809 }
810 }
811}
812EXPORT_SYMBOL(kgsl_mmu_setstate);
813
814int kgsl_mmu_init(struct kgsl_device *device)
815{
816 /*
817 * intialize device mmu
818 *
819 * call this with the global lock held
820 */
821 int status = 0;
822 struct kgsl_mmu *mmu = &device->mmu;
823
824 mmu->device = device;
825
826 /* make sure aligned to pagesize */
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600827 BUG_ON(device->mh.mpu_base & (PAGE_SIZE - 1));
828 BUG_ON((device->mh.mpu_base + device->mh.mpu_range) & (PAGE_SIZE - 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829
830 /* sub-client MMU lookups require address translation */
831 if ((mmu->config & ~0x1) > 0) {
832 /*make sure virtual address range is a multiple of 64Kb */
833 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
834
835 /* allocate memory used for completing r/w operations that
836 * cannot be mapped by the MMU
837 */
838 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
839 if (!status)
840 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
841 mmu->dummyspace.size);
842 }
843
844 return status;
845}
846
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600847void kgsl_mh_start(struct kgsl_device *device)
848{
849 struct kgsl_mh *mh = &device->mh;
850 /* force mmu off to for now*/
851 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
852 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
853
854 /* define physical memory range accessible by the core */
855 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
856 kgsl_regwrite(device, MH_MMU_MPU_END,
857 mh->mpu_base + mh->mpu_range);
858 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
859
860 if (mh->mh_intf_cfg1 != 0)
861 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
862 mh->mh_intf_cfg1);
863
864 if (mh->mh_intf_cfg2 != 0)
865 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
866 mh->mh_intf_cfg2);
867
868 /*
869 * Interrupts are enabled on a per-device level when
870 * kgsl_pwrctrl_irq() is called
871 */
872}
873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874int kgsl_mmu_start(struct kgsl_device *device)
875{
876 /*
877 * intialize device mmu
878 *
879 * call this with the global lock held
880 */
881
882 struct kgsl_mmu *mmu = &device->mmu;
883
884 if (mmu->flags & KGSL_FLAGS_STARTED)
885 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 /* setup MMU and sub-client behavior */
887 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
889
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600890 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
891 mmu->dummyspace.size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600893 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
894 * to complete transactions in case of an MMU fault. Note that
895 * we'll leave the bottom 32 bytes of the dummyspace for other
896 * purposes (e.g. use it when dummy read cycles are needed
897 * for other blocks */
898 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 mmu->dummyspace.physaddr + 32);
900
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600901 if (mmu->defaultpagetable == NULL)
902 mmu->defaultpagetable =
903 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600905 /* Return error if the default pagetable doesn't exist */
906 if (mmu->defaultpagetable == NULL)
907 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600909 mmu->hwpagetable = mmu->defaultpagetable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700910
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600911 kgsl_regwrite(device, MH_MMU_PT_BASE,
912 mmu->hwpagetable->base.gpuaddr);
913 kgsl_regwrite(device, MH_MMU_VA_RANGE,
914 (mmu->hwpagetable->va_base |
915 (mmu->hwpagetable->va_range >> 16)));
916 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
917 mmu->flags |= KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 return 0;
919}
920EXPORT_SYMBOL(kgsl_mmu_start);
921
922unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
923{
924 unsigned int physaddr = 0;
925 pgd_t *pgd_ptr = NULL;
926 pmd_t *pmd_ptr = NULL;
927 pte_t *pte_ptr = NULL, pte;
928
929 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
930 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
931 KGSL_CORE_ERR("Invalid pgd entry\n");
932 return 0;
933 }
934
935 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
936 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
937 KGSL_CORE_ERR("Invalid pmd entry\n");
938 return 0;
939 }
940
941 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
942 if (!pte_ptr) {
943 KGSL_CORE_ERR("pt_offset_map failed\n");
944 return 0;
945 }
946 pte = *pte_ptr;
947 physaddr = pte_pfn(pte);
948 pte_unmap(pte_ptr);
949 physaddr <<= PAGE_SHIFT;
950 return physaddr;
951}
952
953int
954kgsl_mmu_map(struct kgsl_pagetable *pagetable,
955 struct kgsl_memdesc *memdesc,
956 unsigned int protflags)
957{
958 int numpages;
959 unsigned int pte, ptefirst, ptelast, physaddr;
960 int flushtlb;
961 unsigned int offset = 0;
962
963 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
964 BUG_ON(protflags == 0);
965
966 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
967 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
968
969 if (memdesc->gpuaddr == 0) {
970 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
971 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
972 pagetable->name, pagetable->stats.mapped,
973 pagetable->stats.entries);
974 return -ENOMEM;
975 }
976
977 numpages = (memdesc->size >> PAGE_SHIFT);
978
979 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
980 ptelast = ptefirst + numpages;
981
982 pte = ptefirst;
983 flushtlb = 0;
984
985 /* tlb needs to be flushed when the first and last pte are not at
986 * superpte boundaries */
987 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
988 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
989 flushtlb = 1;
990
991 spin_lock(&pagetable->lock);
992 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
993#ifdef VERBOSE_DEBUG
994 /* check if PTE exists */
995 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
996 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
997#endif
998 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
999 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
1000 flushtlb = 1;
1001 /* mark pte as in use */
1002
1003 physaddr = memdesc->ops->physaddr(memdesc, offset);
1004 BUG_ON(physaddr == 0);
1005 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1006 }
1007
1008 /* Keep track of the statistics for the sysfs files */
1009
1010 KGSL_STATS_ADD(1, pagetable->stats.entries,
1011 pagetable->stats.max_entries);
1012
1013 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1014 pagetable->stats.max_mapped);
1015
1016 /* Post all writes to the pagetable */
1017 wmb();
1018
1019 /* Invalidate tlb only if current page table used by GPU is the
1020 * pagetable that we used to allocate */
1021 if (flushtlb) {
1022 /*set all devices as needing flushing*/
1023 pagetable->tlb_flags = UINT_MAX;
1024 GSL_TLBFLUSH_FILTER_RESET();
1025 }
1026 spin_unlock(&pagetable->lock);
1027
1028 return 0;
1029}
1030
1031int
1032kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1033 struct kgsl_memdesc *memdesc)
1034{
1035 unsigned int numpages;
1036 unsigned int pte, ptefirst, ptelast, superpte;
1037 unsigned int range = memdesc->size;
1038
1039 /* All GPU addresses as assigned are page aligned, but some
1040 functions purturb the gpuaddr with an offset, so apply the
1041 mask here to make sure we have the right address */
1042
1043 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1044
1045 if (range == 0 || gpuaddr == 0)
1046 return 0;
1047
1048 numpages = (range >> PAGE_SHIFT);
1049 if (range & (PAGE_SIZE - 1))
1050 numpages++;
1051
1052 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1053 ptelast = ptefirst + numpages;
1054
1055 spin_lock(&pagetable->lock);
1056 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1057 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1058 for (pte = ptefirst; pte < ptelast; pte++) {
1059#ifdef VERBOSE_DEBUG
1060 /* check if PTE exists */
1061 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1062#endif
1063 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1064 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1065 if (pte == superpte)
1066 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1067 GSL_PT_SUPER_PTE);
1068 }
1069
1070 /* Remove the statistics */
1071 pagetable->stats.entries--;
1072 pagetable->stats.mapped -= range;
1073
1074 /* Post all writes to the pagetable */
1075 wmb();
1076
1077 spin_unlock(&pagetable->lock);
1078
1079 gen_pool_free(pagetable->pool, gpuaddr, range);
1080
1081 return 0;
1082}
1083EXPORT_SYMBOL(kgsl_mmu_unmap);
1084
1085int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1086 struct kgsl_memdesc *memdesc, unsigned int protflags)
1087{
1088 int result = -EINVAL;
1089 unsigned int gpuaddr = 0;
1090
1091 if (memdesc == NULL) {
1092 KGSL_CORE_ERR("invalid memdesc\n");
1093 goto error;
1094 }
1095
1096 gpuaddr = memdesc->gpuaddr;
1097
1098 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1099 if (result)
1100 goto error;
1101
1102 /*global mappings must have the same gpu address in all pagetables*/
1103 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1104 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1105 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1106 gpuaddr, memdesc->gpuaddr);
1107 goto error_unmap;
1108 }
1109 return result;
1110error_unmap:
1111 kgsl_mmu_unmap(pagetable, memdesc);
1112error:
1113 return result;
1114}
1115EXPORT_SYMBOL(kgsl_mmu_map_global);
1116
1117int kgsl_mmu_stop(struct kgsl_device *device)
1118{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119 struct kgsl_mmu *mmu = &device->mmu;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -06001120 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1121 mmu->flags &= ~KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 return 0;
1123}
1124EXPORT_SYMBOL(kgsl_mmu_stop);
1125
1126int kgsl_mmu_close(struct kgsl_device *device)
1127{
1128 /*
1129 * close device mmu
1130 *
1131 * call this with the global lock held
1132 */
1133 struct kgsl_mmu *mmu = &device->mmu;
1134
1135 if (mmu->dummyspace.gpuaddr)
1136 kgsl_sharedmem_free(&mmu->dummyspace);
1137
1138 if (mmu->defaultpagetable)
1139 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1140
1141 return 0;
1142}