blob: 0f2bfbe1549fbeb951ed100db7cca74239ef2b27 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
38 return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
39}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
46 return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
47}
48
49static ssize_t
50sysfs_show_ptpool_chunks(struct kobject *kobj,
51 struct kobj_attribute *attr,
52 char *buf)
53{
54 return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
55}
56
57static ssize_t
58sysfs_show_ptpool_ptsize(struct kobject *kobj,
59 struct kobj_attribute *attr,
60 char *buf)
61{
62 return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
63}
64
65static struct kobj_attribute attr_ptpool_entries = {
66 .attr = { .name = "ptpool_entries", .mode = 0444 },
67 .show = sysfs_show_ptpool_entries,
68 .store = NULL,
69};
70
71static struct kobj_attribute attr_ptpool_min = {
72 .attr = { .name = "ptpool_min", .mode = 0444 },
73 .show = sysfs_show_ptpool_min,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_chunks = {
78 .attr = { .name = "ptpool_chunks", .mode = 0444 },
79 .show = sysfs_show_ptpool_chunks,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_ptsize = {
84 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
85 .show = sysfs_show_ptpool_ptsize,
86 .store = NULL,
87};
88
89static struct attribute *ptpool_attrs[] = {
90 &attr_ptpool_entries.attr,
91 &attr_ptpool_min.attr,
92 &attr_ptpool_chunks.attr,
93 &attr_ptpool_ptsize.attr,
94 NULL,
95};
96
97static struct attribute_group ptpool_attr_group = {
98 .attrs = ptpool_attrs,
99};
100
101static int
102_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
103{
104 struct kgsl_ptpool_chunk *chunk;
105 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
106
107 BUG_ON(count == 0);
108
109 if (get_order(size) >= MAX_ORDER) {
110 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
111 return -EINVAL;
112 }
113
114 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
115 if (chunk == NULL) {
116 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
117 return -ENOMEM;
118 }
119
120 chunk->size = size;
121 chunk->count = count;
122 chunk->dynamic = dynamic;
123
124 chunk->data = dma_alloc_coherent(NULL, size,
125 &chunk->phys, GFP_KERNEL);
126
127 if (chunk->data == NULL) {
128 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
129 goto err;
130 }
131
132 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
133
134 if (chunk->bitmap == NULL) {
135 KGSL_CORE_ERR("kzalloc(%d) failed\n",
136 BITS_TO_LONGS(count) * 4);
137 goto err_dma;
138 }
139
140 list_add_tail(&chunk->list, &pool->list);
141
142 pool->chunks++;
143 pool->entries += count;
144
145 if (!dynamic)
146 pool->static_entries += count;
147
148 return 0;
149
150err_dma:
151 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
152err:
153 kfree(chunk);
154 return -ENOMEM;
155}
156
157static void *
158_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
159{
160 struct kgsl_ptpool_chunk *chunk;
161
162 list_for_each_entry(chunk, &pool->list, list) {
163 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
164
165 if (bit >= chunk->count)
166 continue;
167
168 set_bit(bit, chunk->bitmap);
169 *physaddr = chunk->phys + (bit * pool->ptsize);
170
171 return chunk->data + (bit * pool->ptsize);
172 }
173
174 return NULL;
175}
176
177/**
178 * kgsl_ptpool_add
179 * @pool: A pointer to a ptpool structure
180 * @entries: Number of entries to add
181 *
182 * Add static entries to the pagetable pool.
183 */
184
185int
186kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
187{
188 int ret = 0;
189 BUG_ON(count == 0);
190
191 mutex_lock(&pool->lock);
192
193 /* Only 4MB can be allocated in one chunk, so larger allocations
194 need to be split into multiple sections */
195
196 while (count) {
197 int entries = ((count * pool->ptsize) > SZ_4M) ?
198 SZ_4M / pool->ptsize : count;
199
200 /* Add the entries as static, i.e. they don't ever stand
201 a chance of being removed */
202
203 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
204 if (ret)
205 break;
206
207 count -= entries;
208 }
209
210 mutex_unlock(&pool->lock);
211 return ret;
212}
213
214/**
215 * kgsl_ptpool_alloc
216 * @pool: A pointer to a ptpool structure
217 * @addr: A pointer to store the physical address of the chunk
218 *
219 * Allocate a pagetable from the pool. Returns the virtual address
220 * of the pagetable, the physical address is returned in physaddr
221 */
222
223void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
224{
225 void *addr = NULL;
226 int ret;
227
228 mutex_lock(&pool->lock);
229 addr = _kgsl_ptpool_get_entry(pool, physaddr);
230 if (addr)
231 goto done;
232
233 /* Add a chunk for 1 more pagetable and mark it as dynamic */
234 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
235
236 if (ret)
237 goto done;
238
239 addr = _kgsl_ptpool_get_entry(pool, physaddr);
240done:
241 mutex_unlock(&pool->lock);
242 return addr;
243}
244
245static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
246{
247 list_del(&chunk->list);
248
249 if (chunk->data)
250 dma_free_coherent(NULL, chunk->size, chunk->data,
251 chunk->phys);
252 kfree(chunk->bitmap);
253 kfree(chunk);
254}
255
256/**
257 * kgsl_ptpool_free
258 * @pool: A pointer to a ptpool structure
259 * @addr: A pointer to the virtual address to free
260 *
261 * Free a pagetable allocated from the pool
262 */
263
264void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
265{
266 struct kgsl_ptpool_chunk *chunk, *tmp;
267
268 if (pool == NULL || addr == NULL)
269 return;
270
271 mutex_lock(&pool->lock);
272 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
273 if (addr >= chunk->data &&
274 addr < chunk->data + chunk->size) {
275 int bit = ((unsigned long) (addr - chunk->data)) /
276 pool->ptsize;
277
278 clear_bit(bit, chunk->bitmap);
279 memset(addr, 0, pool->ptsize);
280
281 if (chunk->dynamic &&
282 bitmap_empty(chunk->bitmap, chunk->count))
283 _kgsl_ptpool_rm_chunk(chunk);
284
285 break;
286 }
287 }
288
289 mutex_unlock(&pool->lock);
290}
291
292void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
293{
294 struct kgsl_ptpool_chunk *chunk, *tmp;
295
296 if (pool == NULL)
297 return;
298
299 mutex_lock(&pool->lock);
300 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
301 _kgsl_ptpool_rm_chunk(chunk);
302 mutex_unlock(&pool->lock);
303
304 memset(pool, 0, sizeof(*pool));
305}
306
307/**
308 * kgsl_ptpool_init
309 * @pool: A pointer to a ptpool structure to initialize
310 * @ptsize: The size of each pagetable entry
311 * @entries: The number of inital entries to add to the pool
312 *
313 * Initalize a pool and allocate an initial chunk of entries.
314 */
315
316int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
317{
318 int ret = 0;
319 BUG_ON(ptsize == 0);
320
321 pool->ptsize = ptsize;
322 mutex_init(&pool->lock);
323 INIT_LIST_HEAD(&pool->list);
324
325 if (entries) {
326 ret = kgsl_ptpool_add(pool, entries);
327 if (ret)
328 return ret;
329 }
330
331 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
332}
333
334static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
335{
336 int i;
337 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
338 struct kgsl_device *device = kgsl_driver.devp[i];
339 if (device)
340 device->ftbl->cleanup_pt(device, pt);
341 }
342 return 0;
343}
344
345static void kgsl_destroy_pagetable(struct kref *kref)
346{
347 struct kgsl_pagetable *pagetable = container_of(kref,
348 struct kgsl_pagetable, refcount);
349 unsigned long flags;
350
351 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
352 list_del(&pagetable->list);
353 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
354
355 pagetable_remove_sysfs_objects(pagetable);
356
357 kgsl_cleanup_pt(pagetable);
358
359 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
360
361 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
362
363 if (pagetable->pool)
364 gen_pool_destroy(pagetable->pool);
365
366 kfree(pagetable->tlbflushfilter.base);
367 kfree(pagetable);
368}
369
370static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
371{
372 if (pagetable)
373 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
374}
375
376static struct kgsl_pagetable *
377kgsl_get_pagetable(unsigned long name)
378{
379 struct kgsl_pagetable *pt, *ret = NULL;
380 unsigned long flags;
381
382 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
383 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
384 if (pt->name == name) {
385 ret = pt;
386 kref_get(&ret->refcount);
387 break;
388 }
389 }
390
391 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
392 return ret;
393}
394
395static struct kgsl_pagetable *
396_get_pt_from_kobj(struct kobject *kobj)
397{
398 unsigned long ptname;
399
400 if (!kobj)
401 return NULL;
402
403 if (sscanf(kobj->name, "%ld", &ptname) != 1)
404 return NULL;
405
406 return kgsl_get_pagetable(ptname);
407}
408
409static ssize_t
410sysfs_show_entries(struct kobject *kobj,
411 struct kobj_attribute *attr,
412 char *buf)
413{
414 struct kgsl_pagetable *pt;
415 int ret = 0;
416
417 pt = _get_pt_from_kobj(kobj);
418
419 if (pt)
420 ret += sprintf(buf, "%d\n", pt->stats.entries);
421
422 kgsl_put_pagetable(pt);
423 return ret;
424}
425
426static ssize_t
427sysfs_show_mapped(struct kobject *kobj,
428 struct kobj_attribute *attr,
429 char *buf)
430{
431 struct kgsl_pagetable *pt;
432 int ret = 0;
433
434 pt = _get_pt_from_kobj(kobj);
435
436 if (pt)
437 ret += sprintf(buf, "%d\n", pt->stats.mapped);
438
439 kgsl_put_pagetable(pt);
440 return ret;
441}
442
443static ssize_t
444sysfs_show_va_range(struct kobject *kobj,
445 struct kobj_attribute *attr,
446 char *buf)
447{
448 struct kgsl_pagetable *pt;
449 int ret = 0;
450
451 pt = _get_pt_from_kobj(kobj);
452
453 if (pt)
454 ret += sprintf(buf, "0x%x\n", pt->va_range);
455
456 kgsl_put_pagetable(pt);
457 return ret;
458}
459
460static ssize_t
461sysfs_show_max_mapped(struct kobject *kobj,
462 struct kobj_attribute *attr,
463 char *buf)
464{
465 struct kgsl_pagetable *pt;
466 int ret = 0;
467
468 pt = _get_pt_from_kobj(kobj);
469
470 if (pt)
471 ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
472
473 kgsl_put_pagetable(pt);
474 return ret;
475}
476
477static ssize_t
478sysfs_show_max_entries(struct kobject *kobj,
479 struct kobj_attribute *attr,
480 char *buf)
481{
482 struct kgsl_pagetable *pt;
483 int ret = 0;
484
485 pt = _get_pt_from_kobj(kobj);
486
487 if (pt)
488 ret += sprintf(buf, "%d\n", pt->stats.max_entries);
489
490 kgsl_put_pagetable(pt);
491 return ret;
492}
493
494static struct kobj_attribute attr_entries = {
495 .attr = { .name = "entries", .mode = 0444 },
496 .show = sysfs_show_entries,
497 .store = NULL,
498};
499
500static struct kobj_attribute attr_mapped = {
501 .attr = { .name = "mapped", .mode = 0444 },
502 .show = sysfs_show_mapped,
503 .store = NULL,
504};
505
506static struct kobj_attribute attr_va_range = {
507 .attr = { .name = "va_range", .mode = 0444 },
508 .show = sysfs_show_va_range,
509 .store = NULL,
510};
511
512static struct kobj_attribute attr_max_mapped = {
513 .attr = { .name = "max_mapped", .mode = 0444 },
514 .show = sysfs_show_max_mapped,
515 .store = NULL,
516};
517
518static struct kobj_attribute attr_max_entries = {
519 .attr = { .name = "max_entries", .mode = 0444 },
520 .show = sysfs_show_max_entries,
521 .store = NULL,
522};
523
524static struct attribute *pagetable_attrs[] = {
525 &attr_entries.attr,
526 &attr_mapped.attr,
527 &attr_va_range.attr,
528 &attr_max_mapped.attr,
529 &attr_max_entries.attr,
530 NULL,
531};
532
533static struct attribute_group pagetable_attr_group = {
534 .attrs = pagetable_attrs,
535};
536
537static void
538pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
539{
540 if (pagetable->kobj)
541 sysfs_remove_group(pagetable->kobj,
542 &pagetable_attr_group);
543
544 kobject_put(pagetable->kobj);
545}
546
547static int
548pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
549{
550 char ptname[16];
551 int ret = -ENOMEM;
552
553 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
554 pagetable->kobj = kobject_create_and_add(ptname,
555 kgsl_driver.ptkobj);
556 if (pagetable->kobj == NULL)
557 goto err;
558
559 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
560
561err:
562 if (ret) {
563 if (pagetable->kobj)
564 kobject_put(pagetable->kobj);
565
566 pagetable->kobj = NULL;
567 }
568
569 return ret;
570}
571
572static inline uint32_t
573kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
574{
575 return (va - pt->va_base) >> PAGE_SHIFT;
576}
577
578static inline void
579kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
580{
581 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
582
583 writel_relaxed(val, &baseptr[pte]);
584}
585
586static inline uint32_t
587kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
588{
589 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
590 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
591 return ret;
592}
593
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600594int
595kgsl_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600596{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600597 struct kgsl_pagetable *pt;
598 int ptid = -1;
599
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600600 spin_lock(&kgsl_driver.ptlock);
601 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600602 if (pt_base == pt->base.gpuaddr) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600603 ptid = (int) pt->name;
604 break;
605 }
606 }
607 spin_unlock(&kgsl_driver.ptlock);
608
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600609 return ptid;
610}
611
612void kgsl_mmu_pagefault(struct kgsl_device *device)
613{
614 unsigned int reg;
615 unsigned int ptbase;
616
617 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
618 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
619
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600620 KGSL_MEM_CRIT(device,
621 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600622 reg & ~(PAGE_SIZE - 1),
623 kgsl_get_ptname_from_ptbase(ptbase),
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600624 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
625}
626
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627void kgsl_mh_intrcallback(struct kgsl_device *device)
628{
629 unsigned int status = 0;
630 unsigned int reg;
631
632 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
633 kgsl_regread(device, MH_AXI_ERROR, &reg);
634
635 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
636 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600637 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600639 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
640 kgsl_mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600642 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644}
645EXPORT_SYMBOL(kgsl_mh_intrcallback);
646
647static int kgsl_setup_pt(struct kgsl_pagetable *pt)
648{
649 int i = 0;
650 int status = 0;
651
652 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
653 struct kgsl_device *device = kgsl_driver.devp[i];
654 if (device) {
655 status = device->ftbl->setup_pt(device, pt);
656 if (status)
657 goto error_pt;
658 }
659 }
660 return status;
661error_pt:
662 while (i >= 0) {
663 struct kgsl_device *device = kgsl_driver.devp[i];
664 if (device)
665 device->ftbl->cleanup_pt(device, pt);
666 i--;
667 }
668 return status;
669}
670
671static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
672 unsigned int name)
673{
674 int status = 0;
675 struct kgsl_pagetable *pagetable = NULL;
676 unsigned long flags;
677
678 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
679 if (pagetable == NULL) {
680 KGSL_CORE_ERR("kzalloc(%d) failed\n",
681 sizeof(struct kgsl_pagetable));
682 return NULL;
683 }
684
685 kref_init(&pagetable->refcount);
686
687 spin_lock_init(&pagetable->lock);
688 pagetable->tlb_flags = 0;
689 pagetable->name = name;
690 pagetable->va_base = KGSL_PAGETABLE_BASE;
691 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
692 pagetable->last_superpte = 0;
693 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
694
695 pagetable->tlbflushfilter.size = (pagetable->va_range /
696 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
697 pagetable->tlbflushfilter.base = (unsigned int *)
698 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
699 if (!pagetable->tlbflushfilter.base) {
700 KGSL_CORE_ERR("kzalloc(%d) failed\n",
701 pagetable->tlbflushfilter.size);
702 goto err_alloc;
703 }
704 GSL_TLBFLUSH_FILTER_RESET();
705
706 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
707 if (pagetable->pool == NULL) {
708 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
709 goto err_flushfilter;
710 }
711
712 if (gen_pool_add(pagetable->pool, pagetable->va_base,
713 pagetable->va_range, -1)) {
714 KGSL_CORE_ERR("gen_pool_add failed\n");
715 goto err_pool;
716 }
717
718 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
719 &pagetable->base.physaddr);
720
721 if (pagetable->base.hostptr == NULL)
722 goto err_pool;
723
724 /* ptpool allocations are from coherent memory, so update the
725 device statistics acordingly */
726
727 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
728 kgsl_driver.stats.coherent_max);
729
730 pagetable->base.gpuaddr = pagetable->base.physaddr;
731 pagetable->base.size = KGSL_PAGETABLE_SIZE;
732
733 status = kgsl_setup_pt(pagetable);
734 if (status)
735 goto err_free_sharedmem;
736
737 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
738 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
739 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
740
741 /* Create the sysfs entries */
742 pagetable_add_sysfs_objects(pagetable);
743
744 return pagetable;
745
746err_free_sharedmem:
747 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
748err_pool:
749 gen_pool_destroy(pagetable->pool);
750err_flushfilter:
751 kfree(pagetable->tlbflushfilter.base);
752err_alloc:
753 kfree(pagetable);
754
755 return NULL;
756}
757
758struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
759{
760 struct kgsl_pagetable *pt;
761
762 pt = kgsl_get_pagetable(name);
763
764 if (pt == NULL)
765 pt = kgsl_mmu_createpagetableobject(name);
766
767 return pt;
768}
769
770void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
771{
772 kgsl_put_pagetable(pagetable);
773}
774
775void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
776{
777 if (!kgsl_mmu_enabled())
778 return;
779
780 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
781 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
782 kgsl_regwrite(device, MH_MMU_PT_BASE,
783 device->mmu.hwpagetable->base.gpuaddr);
784 }
785
786 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
787 /* Invalidate all and tc */
788 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
789 }
790}
791EXPORT_SYMBOL(kgsl_default_setstate);
792
793void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
794{
795 if (device->ftbl->setstate)
796 device->ftbl->setstate(device, flags);
797}
798EXPORT_SYMBOL(kgsl_setstate);
799
800void kgsl_mmu_setstate(struct kgsl_device *device,
801 struct kgsl_pagetable *pagetable)
802{
803 struct kgsl_mmu *mmu = &device->mmu;
804
805 if (mmu->flags & KGSL_FLAGS_STARTED) {
806 /* page table not current, then setup mmu to use new
807 * specified page table
808 */
809 if (mmu->hwpagetable != pagetable) {
810 mmu->hwpagetable = pagetable;
811 spin_lock(&mmu->hwpagetable->lock);
812 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
813 spin_unlock(&mmu->hwpagetable->lock);
814
815 /* call device specific set page table */
816 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
817 KGSL_MMUFLAGS_PTUPDATE);
818 }
819 }
820}
821EXPORT_SYMBOL(kgsl_mmu_setstate);
822
823int kgsl_mmu_init(struct kgsl_device *device)
824{
825 /*
826 * intialize device mmu
827 *
828 * call this with the global lock held
829 */
830 int status = 0;
831 struct kgsl_mmu *mmu = &device->mmu;
832
833 mmu->device = device;
834
835 /* make sure aligned to pagesize */
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600836 BUG_ON(device->mh.mpu_base & (PAGE_SIZE - 1));
837 BUG_ON((device->mh.mpu_base + device->mh.mpu_range) & (PAGE_SIZE - 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838
839 /* sub-client MMU lookups require address translation */
840 if ((mmu->config & ~0x1) > 0) {
841 /*make sure virtual address range is a multiple of 64Kb */
842 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
843
844 /* allocate memory used for completing r/w operations that
845 * cannot be mapped by the MMU
846 */
847 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
848 if (!status)
849 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
850 mmu->dummyspace.size);
851 }
852
853 return status;
854}
855
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600856void kgsl_mh_start(struct kgsl_device *device)
857{
858 struct kgsl_mh *mh = &device->mh;
859 /* force mmu off to for now*/
860 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
861 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
862
863 /* define physical memory range accessible by the core */
864 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
865 kgsl_regwrite(device, MH_MMU_MPU_END,
866 mh->mpu_base + mh->mpu_range);
867 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
868
869 if (mh->mh_intf_cfg1 != 0)
870 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
871 mh->mh_intf_cfg1);
872
873 if (mh->mh_intf_cfg2 != 0)
874 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
875 mh->mh_intf_cfg2);
876
877 /*
878 * Interrupts are enabled on a per-device level when
879 * kgsl_pwrctrl_irq() is called
880 */
881}
882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883int kgsl_mmu_start(struct kgsl_device *device)
884{
885 /*
886 * intialize device mmu
887 *
888 * call this with the global lock held
889 */
890
891 struct kgsl_mmu *mmu = &device->mmu;
892
893 if (mmu->flags & KGSL_FLAGS_STARTED)
894 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 /* setup MMU and sub-client behavior */
896 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
898
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600899 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
900 mmu->dummyspace.size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600902 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
903 * to complete transactions in case of an MMU fault. Note that
904 * we'll leave the bottom 32 bytes of the dummyspace for other
905 * purposes (e.g. use it when dummy read cycles are needed
906 * for other blocks */
907 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908 mmu->dummyspace.physaddr + 32);
909
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600910 if (mmu->defaultpagetable == NULL)
911 mmu->defaultpagetable =
912 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600914 /* Return error if the default pagetable doesn't exist */
915 if (mmu->defaultpagetable == NULL)
916 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600918 mmu->hwpagetable = mmu->defaultpagetable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700919
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600920 kgsl_regwrite(device, MH_MMU_PT_BASE,
921 mmu->hwpagetable->base.gpuaddr);
922 kgsl_regwrite(device, MH_MMU_VA_RANGE,
923 (mmu->hwpagetable->va_base |
924 (mmu->hwpagetable->va_range >> 16)));
925 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
926 mmu->flags |= KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927 return 0;
928}
929EXPORT_SYMBOL(kgsl_mmu_start);
930
931unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
932{
933 unsigned int physaddr = 0;
934 pgd_t *pgd_ptr = NULL;
935 pmd_t *pmd_ptr = NULL;
936 pte_t *pte_ptr = NULL, pte;
937
938 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
939 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
940 KGSL_CORE_ERR("Invalid pgd entry\n");
941 return 0;
942 }
943
944 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
945 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
946 KGSL_CORE_ERR("Invalid pmd entry\n");
947 return 0;
948 }
949
950 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
951 if (!pte_ptr) {
952 KGSL_CORE_ERR("pt_offset_map failed\n");
953 return 0;
954 }
955 pte = *pte_ptr;
956 physaddr = pte_pfn(pte);
957 pte_unmap(pte_ptr);
958 physaddr <<= PAGE_SHIFT;
959 return physaddr;
960}
961
962int
963kgsl_mmu_map(struct kgsl_pagetable *pagetable,
964 struct kgsl_memdesc *memdesc,
965 unsigned int protflags)
966{
967 int numpages;
968 unsigned int pte, ptefirst, ptelast, physaddr;
969 int flushtlb;
970 unsigned int offset = 0;
971
972 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
973 BUG_ON(protflags == 0);
974
975 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
976 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
977
978 if (memdesc->gpuaddr == 0) {
979 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
980 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
981 pagetable->name, pagetable->stats.mapped,
982 pagetable->stats.entries);
983 return -ENOMEM;
984 }
985
986 numpages = (memdesc->size >> PAGE_SHIFT);
987
988 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
989 ptelast = ptefirst + numpages;
990
991 pte = ptefirst;
992 flushtlb = 0;
993
994 /* tlb needs to be flushed when the first and last pte are not at
995 * superpte boundaries */
996 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
997 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
998 flushtlb = 1;
999
1000 spin_lock(&pagetable->lock);
1001 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
1002#ifdef VERBOSE_DEBUG
1003 /* check if PTE exists */
1004 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
1005 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
1006#endif
1007 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
1008 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
1009 flushtlb = 1;
1010 /* mark pte as in use */
1011
1012 physaddr = memdesc->ops->physaddr(memdesc, offset);
1013 BUG_ON(physaddr == 0);
1014 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1015 }
1016
1017 /* Keep track of the statistics for the sysfs files */
1018
1019 KGSL_STATS_ADD(1, pagetable->stats.entries,
1020 pagetable->stats.max_entries);
1021
1022 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1023 pagetable->stats.max_mapped);
1024
1025 /* Post all writes to the pagetable */
1026 wmb();
1027
1028 /* Invalidate tlb only if current page table used by GPU is the
1029 * pagetable that we used to allocate */
1030 if (flushtlb) {
1031 /*set all devices as needing flushing*/
1032 pagetable->tlb_flags = UINT_MAX;
1033 GSL_TLBFLUSH_FILTER_RESET();
1034 }
1035 spin_unlock(&pagetable->lock);
1036
1037 return 0;
1038}
1039
1040int
1041kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1042 struct kgsl_memdesc *memdesc)
1043{
1044 unsigned int numpages;
1045 unsigned int pte, ptefirst, ptelast, superpte;
1046 unsigned int range = memdesc->size;
1047
1048 /* All GPU addresses as assigned are page aligned, but some
1049 functions purturb the gpuaddr with an offset, so apply the
1050 mask here to make sure we have the right address */
1051
1052 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1053
1054 if (range == 0 || gpuaddr == 0)
1055 return 0;
1056
1057 numpages = (range >> PAGE_SHIFT);
1058 if (range & (PAGE_SIZE - 1))
1059 numpages++;
1060
1061 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1062 ptelast = ptefirst + numpages;
1063
1064 spin_lock(&pagetable->lock);
1065 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1066 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1067 for (pte = ptefirst; pte < ptelast; pte++) {
1068#ifdef VERBOSE_DEBUG
1069 /* check if PTE exists */
1070 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1071#endif
1072 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1073 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1074 if (pte == superpte)
1075 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1076 GSL_PT_SUPER_PTE);
1077 }
1078
1079 /* Remove the statistics */
1080 pagetable->stats.entries--;
1081 pagetable->stats.mapped -= range;
1082
1083 /* Post all writes to the pagetable */
1084 wmb();
1085
1086 spin_unlock(&pagetable->lock);
1087
1088 gen_pool_free(pagetable->pool, gpuaddr, range);
1089
1090 return 0;
1091}
1092EXPORT_SYMBOL(kgsl_mmu_unmap);
1093
1094int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1095 struct kgsl_memdesc *memdesc, unsigned int protflags)
1096{
1097 int result = -EINVAL;
1098 unsigned int gpuaddr = 0;
1099
1100 if (memdesc == NULL) {
1101 KGSL_CORE_ERR("invalid memdesc\n");
1102 goto error;
1103 }
1104
1105 gpuaddr = memdesc->gpuaddr;
1106
1107 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1108 if (result)
1109 goto error;
1110
1111 /*global mappings must have the same gpu address in all pagetables*/
1112 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1113 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1114 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1115 gpuaddr, memdesc->gpuaddr);
1116 goto error_unmap;
1117 }
1118 return result;
1119error_unmap:
1120 kgsl_mmu_unmap(pagetable, memdesc);
1121error:
1122 return result;
1123}
1124EXPORT_SYMBOL(kgsl_mmu_map_global);
1125
1126int kgsl_mmu_stop(struct kgsl_device *device)
1127{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 struct kgsl_mmu *mmu = &device->mmu;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -06001129 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1130 mmu->flags &= ~KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 return 0;
1132}
1133EXPORT_SYMBOL(kgsl_mmu_stop);
1134
1135int kgsl_mmu_close(struct kgsl_device *device)
1136{
1137 /*
1138 * close device mmu
1139 *
1140 * call this with the global lock held
1141 */
1142 struct kgsl_mmu *mmu = &device->mmu;
1143
1144 if (mmu->dummyspace.gpuaddr)
1145 kgsl_sharedmem_free(&mmu->dummyspace);
1146
1147 if (mmu->defaultpagetable)
1148 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1149
1150 return 0;
1151}