blob: d588ff1e750a71a7b269aa472e921b39b907f298 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
28#define GSL_PT_PAGE_BITS_MASK 0x00000007
29#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
30
31static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
32
33static ssize_t
34sysfs_show_ptpool_entries(struct kobject *kobj,
35 struct kobj_attribute *attr,
36 char *buf)
37{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060038 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039}
40
41static ssize_t
42sysfs_show_ptpool_min(struct kobject *kobj,
43 struct kobj_attribute *attr,
44 char *buf)
45{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060046 return snprintf(buf, PAGE_SIZE, "%d\n",
47 kgsl_driver.ptpool.static_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048}
49
50static ssize_t
51sysfs_show_ptpool_chunks(struct kobject *kobj,
52 struct kobj_attribute *attr,
53 char *buf)
54{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060055 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.chunks);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056}
57
58static ssize_t
59sysfs_show_ptpool_ptsize(struct kobject *kobj,
60 struct kobj_attribute *attr,
61 char *buf)
62{
Jeremy Gebbena87bb862011-08-08 16:09:38 -060063 return snprintf(buf, PAGE_SIZE, "%d\n", kgsl_driver.ptpool.ptsize);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064}
65
66static struct kobj_attribute attr_ptpool_entries = {
67 .attr = { .name = "ptpool_entries", .mode = 0444 },
68 .show = sysfs_show_ptpool_entries,
69 .store = NULL,
70};
71
72static struct kobj_attribute attr_ptpool_min = {
73 .attr = { .name = "ptpool_min", .mode = 0444 },
74 .show = sysfs_show_ptpool_min,
75 .store = NULL,
76};
77
78static struct kobj_attribute attr_ptpool_chunks = {
79 .attr = { .name = "ptpool_chunks", .mode = 0444 },
80 .show = sysfs_show_ptpool_chunks,
81 .store = NULL,
82};
83
84static struct kobj_attribute attr_ptpool_ptsize = {
85 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
86 .show = sysfs_show_ptpool_ptsize,
87 .store = NULL,
88};
89
90static struct attribute *ptpool_attrs[] = {
91 &attr_ptpool_entries.attr,
92 &attr_ptpool_min.attr,
93 &attr_ptpool_chunks.attr,
94 &attr_ptpool_ptsize.attr,
95 NULL,
96};
97
98static struct attribute_group ptpool_attr_group = {
99 .attrs = ptpool_attrs,
100};
101
102static int
103_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
104{
105 struct kgsl_ptpool_chunk *chunk;
106 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
107
108 BUG_ON(count == 0);
109
110 if (get_order(size) >= MAX_ORDER) {
111 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
112 return -EINVAL;
113 }
114
115 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
116 if (chunk == NULL) {
117 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
118 return -ENOMEM;
119 }
120
121 chunk->size = size;
122 chunk->count = count;
123 chunk->dynamic = dynamic;
124
125 chunk->data = dma_alloc_coherent(NULL, size,
126 &chunk->phys, GFP_KERNEL);
127
128 if (chunk->data == NULL) {
129 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
130 goto err;
131 }
132
133 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
134
135 if (chunk->bitmap == NULL) {
136 KGSL_CORE_ERR("kzalloc(%d) failed\n",
137 BITS_TO_LONGS(count) * 4);
138 goto err_dma;
139 }
140
141 list_add_tail(&chunk->list, &pool->list);
142
143 pool->chunks++;
144 pool->entries += count;
145
146 if (!dynamic)
147 pool->static_entries += count;
148
149 return 0;
150
151err_dma:
152 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
153err:
154 kfree(chunk);
155 return -ENOMEM;
156}
157
158static void *
159_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
160{
161 struct kgsl_ptpool_chunk *chunk;
162
163 list_for_each_entry(chunk, &pool->list, list) {
164 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
165
166 if (bit >= chunk->count)
167 continue;
168
169 set_bit(bit, chunk->bitmap);
170 *physaddr = chunk->phys + (bit * pool->ptsize);
171
172 return chunk->data + (bit * pool->ptsize);
173 }
174
175 return NULL;
176}
177
178/**
179 * kgsl_ptpool_add
180 * @pool: A pointer to a ptpool structure
181 * @entries: Number of entries to add
182 *
183 * Add static entries to the pagetable pool.
184 */
185
186int
187kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
188{
189 int ret = 0;
190 BUG_ON(count == 0);
191
192 mutex_lock(&pool->lock);
193
194 /* Only 4MB can be allocated in one chunk, so larger allocations
195 need to be split into multiple sections */
196
197 while (count) {
198 int entries = ((count * pool->ptsize) > SZ_4M) ?
199 SZ_4M / pool->ptsize : count;
200
201 /* Add the entries as static, i.e. they don't ever stand
202 a chance of being removed */
203
204 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
205 if (ret)
206 break;
207
208 count -= entries;
209 }
210
211 mutex_unlock(&pool->lock);
212 return ret;
213}
214
215/**
216 * kgsl_ptpool_alloc
217 * @pool: A pointer to a ptpool structure
218 * @addr: A pointer to store the physical address of the chunk
219 *
220 * Allocate a pagetable from the pool. Returns the virtual address
221 * of the pagetable, the physical address is returned in physaddr
222 */
223
224void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
225{
226 void *addr = NULL;
227 int ret;
228
229 mutex_lock(&pool->lock);
230 addr = _kgsl_ptpool_get_entry(pool, physaddr);
231 if (addr)
232 goto done;
233
234 /* Add a chunk for 1 more pagetable and mark it as dynamic */
235 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
236
237 if (ret)
238 goto done;
239
240 addr = _kgsl_ptpool_get_entry(pool, physaddr);
241done:
242 mutex_unlock(&pool->lock);
243 return addr;
244}
245
246static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
247{
248 list_del(&chunk->list);
249
250 if (chunk->data)
251 dma_free_coherent(NULL, chunk->size, chunk->data,
252 chunk->phys);
253 kfree(chunk->bitmap);
254 kfree(chunk);
255}
256
257/**
258 * kgsl_ptpool_free
259 * @pool: A pointer to a ptpool structure
260 * @addr: A pointer to the virtual address to free
261 *
262 * Free a pagetable allocated from the pool
263 */
264
265void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
266{
267 struct kgsl_ptpool_chunk *chunk, *tmp;
268
269 if (pool == NULL || addr == NULL)
270 return;
271
272 mutex_lock(&pool->lock);
273 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
274 if (addr >= chunk->data &&
275 addr < chunk->data + chunk->size) {
276 int bit = ((unsigned long) (addr - chunk->data)) /
277 pool->ptsize;
278
279 clear_bit(bit, chunk->bitmap);
280 memset(addr, 0, pool->ptsize);
281
282 if (chunk->dynamic &&
283 bitmap_empty(chunk->bitmap, chunk->count))
284 _kgsl_ptpool_rm_chunk(chunk);
285
286 break;
287 }
288 }
289
290 mutex_unlock(&pool->lock);
291}
292
293void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
294{
295 struct kgsl_ptpool_chunk *chunk, *tmp;
296
297 if (pool == NULL)
298 return;
299
300 mutex_lock(&pool->lock);
301 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
302 _kgsl_ptpool_rm_chunk(chunk);
303 mutex_unlock(&pool->lock);
304
305 memset(pool, 0, sizeof(*pool));
306}
307
308/**
309 * kgsl_ptpool_init
310 * @pool: A pointer to a ptpool structure to initialize
311 * @ptsize: The size of each pagetable entry
312 * @entries: The number of inital entries to add to the pool
313 *
314 * Initalize a pool and allocate an initial chunk of entries.
315 */
316
317int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
318{
319 int ret = 0;
320 BUG_ON(ptsize == 0);
321
322 pool->ptsize = ptsize;
323 mutex_init(&pool->lock);
324 INIT_LIST_HEAD(&pool->list);
325
326 if (entries) {
327 ret = kgsl_ptpool_add(pool, entries);
328 if (ret)
329 return ret;
330 }
331
332 return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
333}
334
335static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
336{
337 int i;
338 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
339 struct kgsl_device *device = kgsl_driver.devp[i];
340 if (device)
341 device->ftbl->cleanup_pt(device, pt);
342 }
343 return 0;
344}
345
346static void kgsl_destroy_pagetable(struct kref *kref)
347{
348 struct kgsl_pagetable *pagetable = container_of(kref,
349 struct kgsl_pagetable, refcount);
350 unsigned long flags;
351
352 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
353 list_del(&pagetable->list);
354 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
355
356 pagetable_remove_sysfs_objects(pagetable);
357
358 kgsl_cleanup_pt(pagetable);
359
360 kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
361
362 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
363
364 if (pagetable->pool)
365 gen_pool_destroy(pagetable->pool);
366
367 kfree(pagetable->tlbflushfilter.base);
368 kfree(pagetable);
369}
370
371static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
372{
373 if (pagetable)
374 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
375}
376
377static struct kgsl_pagetable *
378kgsl_get_pagetable(unsigned long name)
379{
380 struct kgsl_pagetable *pt, *ret = NULL;
381 unsigned long flags;
382
383 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
384 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
385 if (pt->name == name) {
386 ret = pt;
387 kref_get(&ret->refcount);
388 break;
389 }
390 }
391
392 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
393 return ret;
394}
395
396static struct kgsl_pagetable *
397_get_pt_from_kobj(struct kobject *kobj)
398{
399 unsigned long ptname;
400
401 if (!kobj)
402 return NULL;
403
404 if (sscanf(kobj->name, "%ld", &ptname) != 1)
405 return NULL;
406
407 return kgsl_get_pagetable(ptname);
408}
409
410static ssize_t
411sysfs_show_entries(struct kobject *kobj,
412 struct kobj_attribute *attr,
413 char *buf)
414{
415 struct kgsl_pagetable *pt;
416 int ret = 0;
417
418 pt = _get_pt_from_kobj(kobj);
419
420 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600421 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
423 kgsl_put_pagetable(pt);
424 return ret;
425}
426
427static ssize_t
428sysfs_show_mapped(struct kobject *kobj,
429 struct kobj_attribute *attr,
430 char *buf)
431{
432 struct kgsl_pagetable *pt;
433 int ret = 0;
434
435 pt = _get_pt_from_kobj(kobj);
436
437 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600438 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 kgsl_put_pagetable(pt);
441 return ret;
442}
443
444static ssize_t
445sysfs_show_va_range(struct kobject *kobj,
446 struct kobj_attribute *attr,
447 char *buf)
448{
449 struct kgsl_pagetable *pt;
450 int ret = 0;
451
452 pt = _get_pt_from_kobj(kobj);
453
454 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600455 ret += snprintf(buf, PAGE_SIZE, "0x%x\n", pt->va_range);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700456
457 kgsl_put_pagetable(pt);
458 return ret;
459}
460
461static ssize_t
462sysfs_show_max_mapped(struct kobject *kobj,
463 struct kobj_attribute *attr,
464 char *buf)
465{
466 struct kgsl_pagetable *pt;
467 int ret = 0;
468
469 pt = _get_pt_from_kobj(kobj);
470
471 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600472 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473
474 kgsl_put_pagetable(pt);
475 return ret;
476}
477
478static ssize_t
479sysfs_show_max_entries(struct kobject *kobj,
480 struct kobj_attribute *attr,
481 char *buf)
482{
483 struct kgsl_pagetable *pt;
484 int ret = 0;
485
486 pt = _get_pt_from_kobj(kobj);
487
488 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600489 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490
491 kgsl_put_pagetable(pt);
492 return ret;
493}
494
495static struct kobj_attribute attr_entries = {
496 .attr = { .name = "entries", .mode = 0444 },
497 .show = sysfs_show_entries,
498 .store = NULL,
499};
500
501static struct kobj_attribute attr_mapped = {
502 .attr = { .name = "mapped", .mode = 0444 },
503 .show = sysfs_show_mapped,
504 .store = NULL,
505};
506
507static struct kobj_attribute attr_va_range = {
508 .attr = { .name = "va_range", .mode = 0444 },
509 .show = sysfs_show_va_range,
510 .store = NULL,
511};
512
513static struct kobj_attribute attr_max_mapped = {
514 .attr = { .name = "max_mapped", .mode = 0444 },
515 .show = sysfs_show_max_mapped,
516 .store = NULL,
517};
518
519static struct kobj_attribute attr_max_entries = {
520 .attr = { .name = "max_entries", .mode = 0444 },
521 .show = sysfs_show_max_entries,
522 .store = NULL,
523};
524
525static struct attribute *pagetable_attrs[] = {
526 &attr_entries.attr,
527 &attr_mapped.attr,
528 &attr_va_range.attr,
529 &attr_max_mapped.attr,
530 &attr_max_entries.attr,
531 NULL,
532};
533
534static struct attribute_group pagetable_attr_group = {
535 .attrs = pagetable_attrs,
536};
537
538static void
539pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
540{
541 if (pagetable->kobj)
542 sysfs_remove_group(pagetable->kobj,
543 &pagetable_attr_group);
544
545 kobject_put(pagetable->kobj);
546}
547
548static int
549pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
550{
551 char ptname[16];
552 int ret = -ENOMEM;
553
554 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
555 pagetable->kobj = kobject_create_and_add(ptname,
556 kgsl_driver.ptkobj);
557 if (pagetable->kobj == NULL)
558 goto err;
559
560 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
561
562err:
563 if (ret) {
564 if (pagetable->kobj)
565 kobject_put(pagetable->kobj);
566
567 pagetable->kobj = NULL;
568 }
569
570 return ret;
571}
572
573static inline uint32_t
574kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
575{
576 return (va - pt->va_base) >> PAGE_SHIFT;
577}
578
579static inline void
580kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
581{
582 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
583
584 writel_relaxed(val, &baseptr[pte]);
585}
586
587static inline uint32_t
588kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
589{
590 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
591 uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
592 return ret;
593}
594
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600595int
596kgsl_get_ptname_from_ptbase(unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600597{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600598 struct kgsl_pagetable *pt;
599 int ptid = -1;
600
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600601 spin_lock(&kgsl_driver.ptlock);
602 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600603 if (pt_base == pt->base.gpuaddr) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600604 ptid = (int) pt->name;
605 break;
606 }
607 }
608 spin_unlock(&kgsl_driver.ptlock);
609
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600610 return ptid;
611}
612
613void kgsl_mmu_pagefault(struct kgsl_device *device)
614{
615 unsigned int reg;
616 unsigned int ptbase;
617
618 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
619 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
620
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600621 KGSL_MEM_CRIT(device,
622 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600623 reg & ~(PAGE_SIZE - 1),
624 kgsl_get_ptname_from_ptbase(ptbase),
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600625 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
626}
627
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628void kgsl_mh_intrcallback(struct kgsl_device *device)
629{
630 unsigned int status = 0;
631 unsigned int reg;
632
633 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
634 kgsl_regread(device, MH_AXI_ERROR, &reg);
635
636 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
637 KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600638 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600640 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
641 kgsl_mmu_pagefault(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600643 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645}
646EXPORT_SYMBOL(kgsl_mh_intrcallback);
647
648static int kgsl_setup_pt(struct kgsl_pagetable *pt)
649{
650 int i = 0;
651 int status = 0;
652
653 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
654 struct kgsl_device *device = kgsl_driver.devp[i];
655 if (device) {
656 status = device->ftbl->setup_pt(device, pt);
657 if (status)
658 goto error_pt;
659 }
660 }
661 return status;
662error_pt:
663 while (i >= 0) {
664 struct kgsl_device *device = kgsl_driver.devp[i];
665 if (device)
666 device->ftbl->cleanup_pt(device, pt);
667 i--;
668 }
669 return status;
670}
671
672static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
673 unsigned int name)
674{
675 int status = 0;
676 struct kgsl_pagetable *pagetable = NULL;
677 unsigned long flags;
678
679 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
680 if (pagetable == NULL) {
681 KGSL_CORE_ERR("kzalloc(%d) failed\n",
682 sizeof(struct kgsl_pagetable));
683 return NULL;
684 }
685
686 kref_init(&pagetable->refcount);
687
688 spin_lock_init(&pagetable->lock);
689 pagetable->tlb_flags = 0;
690 pagetable->name = name;
691 pagetable->va_base = KGSL_PAGETABLE_BASE;
692 pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
693 pagetable->last_superpte = 0;
694 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
695
696 pagetable->tlbflushfilter.size = (pagetable->va_range /
697 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
698 pagetable->tlbflushfilter.base = (unsigned int *)
699 kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
700 if (!pagetable->tlbflushfilter.base) {
701 KGSL_CORE_ERR("kzalloc(%d) failed\n",
702 pagetable->tlbflushfilter.size);
703 goto err_alloc;
704 }
705 GSL_TLBFLUSH_FILTER_RESET();
706
707 pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
708 if (pagetable->pool == NULL) {
709 KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
710 goto err_flushfilter;
711 }
712
713 if (gen_pool_add(pagetable->pool, pagetable->va_base,
714 pagetable->va_range, -1)) {
715 KGSL_CORE_ERR("gen_pool_add failed\n");
716 goto err_pool;
717 }
718
719 pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
720 &pagetable->base.physaddr);
721
722 if (pagetable->base.hostptr == NULL)
723 goto err_pool;
724
725 /* ptpool allocations are from coherent memory, so update the
726 device statistics acordingly */
727
728 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
729 kgsl_driver.stats.coherent_max);
730
731 pagetable->base.gpuaddr = pagetable->base.physaddr;
732 pagetable->base.size = KGSL_PAGETABLE_SIZE;
733
734 status = kgsl_setup_pt(pagetable);
735 if (status)
736 goto err_free_sharedmem;
737
738 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
739 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
740 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
741
742 /* Create the sysfs entries */
743 pagetable_add_sysfs_objects(pagetable);
744
745 return pagetable;
746
747err_free_sharedmem:
748 kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
749err_pool:
750 gen_pool_destroy(pagetable->pool);
751err_flushfilter:
752 kfree(pagetable->tlbflushfilter.base);
753err_alloc:
754 kfree(pagetable);
755
756 return NULL;
757}
758
759struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
760{
761 struct kgsl_pagetable *pt;
762
763 pt = kgsl_get_pagetable(name);
764
765 if (pt == NULL)
766 pt = kgsl_mmu_createpagetableobject(name);
767
768 return pt;
769}
770
771void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
772{
773 kgsl_put_pagetable(pagetable);
774}
775
776void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
777{
778 if (!kgsl_mmu_enabled())
779 return;
780
781 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
782 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
783 kgsl_regwrite(device, MH_MMU_PT_BASE,
784 device->mmu.hwpagetable->base.gpuaddr);
785 }
786
787 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
788 /* Invalidate all and tc */
789 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
790 }
791}
792EXPORT_SYMBOL(kgsl_default_setstate);
793
794void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
795{
796 if (device->ftbl->setstate)
797 device->ftbl->setstate(device, flags);
798}
799EXPORT_SYMBOL(kgsl_setstate);
800
801void kgsl_mmu_setstate(struct kgsl_device *device,
802 struct kgsl_pagetable *pagetable)
803{
804 struct kgsl_mmu *mmu = &device->mmu;
805
806 if (mmu->flags & KGSL_FLAGS_STARTED) {
807 /* page table not current, then setup mmu to use new
808 * specified page table
809 */
810 if (mmu->hwpagetable != pagetable) {
811 mmu->hwpagetable = pagetable;
812 spin_lock(&mmu->hwpagetable->lock);
813 mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
814 spin_unlock(&mmu->hwpagetable->lock);
815
816 /* call device specific set page table */
817 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
818 KGSL_MMUFLAGS_PTUPDATE);
819 }
820 }
821}
822EXPORT_SYMBOL(kgsl_mmu_setstate);
823
824int kgsl_mmu_init(struct kgsl_device *device)
825{
826 /*
827 * intialize device mmu
828 *
829 * call this with the global lock held
830 */
831 int status = 0;
832 struct kgsl_mmu *mmu = &device->mmu;
833
834 mmu->device = device;
835
836 /* make sure aligned to pagesize */
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600837 BUG_ON(device->mh.mpu_base & (PAGE_SIZE - 1));
838 BUG_ON((device->mh.mpu_base + device->mh.mpu_range) & (PAGE_SIZE - 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700839
840 /* sub-client MMU lookups require address translation */
841 if ((mmu->config & ~0x1) > 0) {
842 /*make sure virtual address range is a multiple of 64Kb */
843 BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
844
845 /* allocate memory used for completing r/w operations that
846 * cannot be mapped by the MMU
847 */
848 status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
849 if (!status)
850 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
851 mmu->dummyspace.size);
852 }
853
854 return status;
855}
856
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600857void kgsl_mh_start(struct kgsl_device *device)
858{
859 struct kgsl_mh *mh = &device->mh;
860 /* force mmu off to for now*/
861 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
862 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
863
864 /* define physical memory range accessible by the core */
865 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
866 kgsl_regwrite(device, MH_MMU_MPU_END,
867 mh->mpu_base + mh->mpu_range);
868 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
869
870 if (mh->mh_intf_cfg1 != 0)
871 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
872 mh->mh_intf_cfg1);
873
874 if (mh->mh_intf_cfg2 != 0)
875 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
876 mh->mh_intf_cfg2);
877
878 /*
879 * Interrupts are enabled on a per-device level when
880 * kgsl_pwrctrl_irq() is called
881 */
882}
883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700884int kgsl_mmu_start(struct kgsl_device *device)
885{
886 /*
887 * intialize device mmu
888 *
889 * call this with the global lock held
890 */
891
892 struct kgsl_mmu *mmu = &device->mmu;
893
894 if (mmu->flags & KGSL_FLAGS_STARTED)
895 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700896 /* setup MMU and sub-client behavior */
897 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
899
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600900 kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
901 mmu->dummyspace.size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700902
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600903 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
904 * to complete transactions in case of an MMU fault. Note that
905 * we'll leave the bottom 32 bytes of the dummyspace for other
906 * purposes (e.g. use it when dummy read cycles are needed
907 * for other blocks */
908 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909 mmu->dummyspace.physaddr + 32);
910
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600911 if (mmu->defaultpagetable == NULL)
912 mmu->defaultpagetable =
913 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600915 /* Return error if the default pagetable doesn't exist */
916 if (mmu->defaultpagetable == NULL)
917 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600919 mmu->hwpagetable = mmu->defaultpagetable;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600921 kgsl_regwrite(device, MH_MMU_PT_BASE,
922 mmu->hwpagetable->base.gpuaddr);
923 kgsl_regwrite(device, MH_MMU_VA_RANGE,
924 (mmu->hwpagetable->va_base |
925 (mmu->hwpagetable->va_range >> 16)));
926 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
927 mmu->flags |= KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 return 0;
929}
930EXPORT_SYMBOL(kgsl_mmu_start);
931
932unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
933{
934 unsigned int physaddr = 0;
935 pgd_t *pgd_ptr = NULL;
936 pmd_t *pmd_ptr = NULL;
937 pte_t *pte_ptr = NULL, pte;
938
939 pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
940 if (pgd_none(*pgd) || pgd_bad(*pgd)) {
941 KGSL_CORE_ERR("Invalid pgd entry\n");
942 return 0;
943 }
944
945 pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
946 if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
947 KGSL_CORE_ERR("Invalid pmd entry\n");
948 return 0;
949 }
950
951 pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
952 if (!pte_ptr) {
953 KGSL_CORE_ERR("pt_offset_map failed\n");
954 return 0;
955 }
956 pte = *pte_ptr;
957 physaddr = pte_pfn(pte);
958 pte_unmap(pte_ptr);
959 physaddr <<= PAGE_SHIFT;
960 return physaddr;
961}
962
963int
964kgsl_mmu_map(struct kgsl_pagetable *pagetable,
965 struct kgsl_memdesc *memdesc,
966 unsigned int protflags)
967{
968 int numpages;
969 unsigned int pte, ptefirst, ptelast, physaddr;
970 int flushtlb;
971 unsigned int offset = 0;
972
973 BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
974 BUG_ON(protflags == 0);
975
976 memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
977 memdesc->size, KGSL_MMU_ALIGN_SHIFT);
978
979 if (memdesc->gpuaddr == 0) {
980 KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
981 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
982 pagetable->name, pagetable->stats.mapped,
983 pagetable->stats.entries);
984 return -ENOMEM;
985 }
986
987 numpages = (memdesc->size >> PAGE_SHIFT);
988
989 ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
990 ptelast = ptefirst + numpages;
991
992 pte = ptefirst;
993 flushtlb = 0;
994
995 /* tlb needs to be flushed when the first and last pte are not at
996 * superpte boundaries */
997 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
998 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
999 flushtlb = 1;
1000
1001 spin_lock(&pagetable->lock);
1002 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
1003#ifdef VERBOSE_DEBUG
1004 /* check if PTE exists */
1005 uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
1006 BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
1007#endif
1008 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
1009 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
1010 flushtlb = 1;
1011 /* mark pte as in use */
1012
1013 physaddr = memdesc->ops->physaddr(memdesc, offset);
1014 BUG_ON(physaddr == 0);
1015 kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
1016 }
1017
1018 /* Keep track of the statistics for the sysfs files */
1019
1020 KGSL_STATS_ADD(1, pagetable->stats.entries,
1021 pagetable->stats.max_entries);
1022
1023 KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
1024 pagetable->stats.max_mapped);
1025
1026 /* Post all writes to the pagetable */
1027 wmb();
1028
1029 /* Invalidate tlb only if current page table used by GPU is the
1030 * pagetable that we used to allocate */
1031 if (flushtlb) {
1032 /*set all devices as needing flushing*/
1033 pagetable->tlb_flags = UINT_MAX;
1034 GSL_TLBFLUSH_FILTER_RESET();
1035 }
1036 spin_unlock(&pagetable->lock);
1037
1038 return 0;
1039}
1040
1041int
1042kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
1043 struct kgsl_memdesc *memdesc)
1044{
1045 unsigned int numpages;
1046 unsigned int pte, ptefirst, ptelast, superpte;
1047 unsigned int range = memdesc->size;
1048
1049 /* All GPU addresses as assigned are page aligned, but some
1050 functions purturb the gpuaddr with an offset, so apply the
1051 mask here to make sure we have the right address */
1052
1053 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
1054
1055 if (range == 0 || gpuaddr == 0)
1056 return 0;
1057
1058 numpages = (range >> PAGE_SHIFT);
1059 if (range & (PAGE_SIZE - 1))
1060 numpages++;
1061
1062 ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
1063 ptelast = ptefirst + numpages;
1064
1065 spin_lock(&pagetable->lock);
1066 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
1067 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
1068 for (pte = ptefirst; pte < ptelast; pte++) {
1069#ifdef VERBOSE_DEBUG
1070 /* check if PTE exists */
1071 BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
1072#endif
1073 kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
1074 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
1075 if (pte == superpte)
1076 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
1077 GSL_PT_SUPER_PTE);
1078 }
1079
1080 /* Remove the statistics */
1081 pagetable->stats.entries--;
1082 pagetable->stats.mapped -= range;
1083
1084 /* Post all writes to the pagetable */
1085 wmb();
1086
1087 spin_unlock(&pagetable->lock);
1088
1089 gen_pool_free(pagetable->pool, gpuaddr, range);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(kgsl_mmu_unmap);
1094
1095int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
1096 struct kgsl_memdesc *memdesc, unsigned int protflags)
1097{
1098 int result = -EINVAL;
1099 unsigned int gpuaddr = 0;
1100
1101 if (memdesc == NULL) {
1102 KGSL_CORE_ERR("invalid memdesc\n");
1103 goto error;
1104 }
1105
1106 gpuaddr = memdesc->gpuaddr;
1107
1108 result = kgsl_mmu_map(pagetable, memdesc, protflags);
1109 if (result)
1110 goto error;
1111
1112 /*global mappings must have the same gpu address in all pagetables*/
1113 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
1114 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
1115 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
1116 gpuaddr, memdesc->gpuaddr);
1117 goto error_unmap;
1118 }
1119 return result;
1120error_unmap:
1121 kgsl_mmu_unmap(pagetable, memdesc);
1122error:
1123 return result;
1124}
1125EXPORT_SYMBOL(kgsl_mmu_map_global);
1126
1127int kgsl_mmu_stop(struct kgsl_device *device)
1128{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 struct kgsl_mmu *mmu = &device->mmu;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -06001130 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
1131 mmu->flags &= ~KGSL_FLAGS_STARTED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132 return 0;
1133}
1134EXPORT_SYMBOL(kgsl_mmu_stop);
1135
1136int kgsl_mmu_close(struct kgsl_device *device)
1137{
1138 /*
1139 * close device mmu
1140 *
1141 * call this with the global lock held
1142 */
1143 struct kgsl_mmu *mmu = &device->mmu;
1144
1145 if (mmu->dummyspace.gpuaddr)
1146 kgsl_sharedmem_free(&mmu->dummyspace);
1147
1148 if (mmu->defaultpagetable)
1149 kgsl_mmu_putpagetable(mmu->defaultpagetable);
1150
1151 return 0;
1152}