blob: 383b910605942e5b283172590e3fff2efb00639a [file] [log] [blame]
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
25static ssize_t
26sysfs_show_ptpool_entries(struct kobject *kobj,
27 struct kobj_attribute *attr,
28 char *buf)
29{
30 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
31 kgsl_driver.ptpool;
32 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
33}
34
35static ssize_t
36sysfs_show_ptpool_min(struct kobject *kobj,
37 struct kobj_attribute *attr,
38 char *buf)
39{
40 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
41 kgsl_driver.ptpool;
42 return snprintf(buf, PAGE_SIZE, "%d\n",
43 pool->static_entries);
44}
45
46static ssize_t
47sysfs_show_ptpool_chunks(struct kobject *kobj,
48 struct kobj_attribute *attr,
49 char *buf)
50{
51 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
52 kgsl_driver.ptpool;
53 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
54}
55
56static ssize_t
57sysfs_show_ptpool_ptsize(struct kobject *kobj,
58 struct kobj_attribute *attr,
59 char *buf)
60{
61 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
62 kgsl_driver.ptpool;
63 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
64}
65
66static struct kobj_attribute attr_ptpool_entries = {
67 .attr = { .name = "ptpool_entries", .mode = 0444 },
68 .show = sysfs_show_ptpool_entries,
69 .store = NULL,
70};
71
72static struct kobj_attribute attr_ptpool_min = {
73 .attr = { .name = "ptpool_min", .mode = 0444 },
74 .show = sysfs_show_ptpool_min,
75 .store = NULL,
76};
77
78static struct kobj_attribute attr_ptpool_chunks = {
79 .attr = { .name = "ptpool_chunks", .mode = 0444 },
80 .show = sysfs_show_ptpool_chunks,
81 .store = NULL,
82};
83
84static struct kobj_attribute attr_ptpool_ptsize = {
85 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
86 .show = sysfs_show_ptpool_ptsize,
87 .store = NULL,
88};
89
90static struct attribute *ptpool_attrs[] = {
91 &attr_ptpool_entries.attr,
92 &attr_ptpool_min.attr,
93 &attr_ptpool_chunks.attr,
94 &attr_ptpool_ptsize.attr,
95 NULL,
96};
97
98static struct attribute_group ptpool_attr_group = {
99 .attrs = ptpool_attrs,
100};
101
102static int
103_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
104{
105 struct kgsl_ptpool_chunk *chunk;
106 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
107
108 BUG_ON(count == 0);
109
110 if (get_order(size) >= MAX_ORDER) {
111 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
112 return -EINVAL;
113 }
114
115 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
116 if (chunk == NULL) {
117 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
118 return -ENOMEM;
119 }
120
121 chunk->size = size;
122 chunk->count = count;
123 chunk->dynamic = dynamic;
124
125 chunk->data = dma_alloc_coherent(NULL, size,
126 &chunk->phys, GFP_KERNEL);
127
128 if (chunk->data == NULL) {
129 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
130 goto err;
131 }
132
133 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
134
135 if (chunk->bitmap == NULL) {
136 KGSL_CORE_ERR("kzalloc(%d) failed\n",
137 BITS_TO_LONGS(count) * 4);
138 goto err_dma;
139 }
140
141 list_add_tail(&chunk->list, &pool->list);
142
143 pool->chunks++;
144 pool->entries += count;
145
146 if (!dynamic)
147 pool->static_entries += count;
148
149 return 0;
150
151err_dma:
152 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
153err:
154 kfree(chunk);
155 return -ENOMEM;
156}
157
158static void *
159_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
160{
161 struct kgsl_ptpool_chunk *chunk;
162
163 list_for_each_entry(chunk, &pool->list, list) {
164 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
165
166 if (bit >= chunk->count)
167 continue;
168
169 set_bit(bit, chunk->bitmap);
170 *physaddr = chunk->phys + (bit * pool->ptsize);
171
172 return chunk->data + (bit * pool->ptsize);
173 }
174
175 return NULL;
176}
177
178/**
179 * kgsl_ptpool_add
180 * @pool: A pointer to a ptpool structure
181 * @entries: Number of entries to add
182 *
183 * Add static entries to the pagetable pool.
184 */
185
186static int
187kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
188{
189 int ret = 0;
190 BUG_ON(count == 0);
191
192 mutex_lock(&pool->lock);
193
194 /* Only 4MB can be allocated in one chunk, so larger allocations
195 need to be split into multiple sections */
196
197 while (count) {
198 int entries = ((count * pool->ptsize) > SZ_4M) ?
199 SZ_4M / pool->ptsize : count;
200
201 /* Add the entries as static, i.e. they don't ever stand
202 a chance of being removed */
203
204 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
205 if (ret)
206 break;
207
208 count -= entries;
209 }
210
211 mutex_unlock(&pool->lock);
212 return ret;
213}
214
215/**
216 * kgsl_ptpool_alloc
217 * @pool: A pointer to a ptpool structure
218 * @addr: A pointer to store the physical address of the chunk
219 *
220 * Allocate a pagetable from the pool. Returns the virtual address
221 * of the pagetable, the physical address is returned in physaddr
222 */
223
224static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
225 unsigned int *physaddr)
226{
227 void *addr = NULL;
228 int ret;
229
230 mutex_lock(&pool->lock);
231 addr = _kgsl_ptpool_get_entry(pool, physaddr);
232 if (addr)
233 goto done;
234
235 /* Add a chunk for 1 more pagetable and mark it as dynamic */
236 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
237
238 if (ret)
239 goto done;
240
241 addr = _kgsl_ptpool_get_entry(pool, physaddr);
242done:
243 mutex_unlock(&pool->lock);
244 return addr;
245}
246
247static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
248{
249 list_del(&chunk->list);
250
251 if (chunk->data)
252 dma_free_coherent(NULL, chunk->size, chunk->data,
253 chunk->phys);
254 kfree(chunk->bitmap);
255 kfree(chunk);
256}
257
258/**
259 * kgsl_ptpool_free
260 * @pool: A pointer to a ptpool structure
261 * @addr: A pointer to the virtual address to free
262 *
263 * Free a pagetable allocated from the pool
264 */
265
266static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
267{
268 struct kgsl_ptpool_chunk *chunk, *tmp;
269
270 if (pool == NULL || addr == NULL)
271 return;
272
273 mutex_lock(&pool->lock);
274 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
275 if (addr >= chunk->data &&
276 addr < chunk->data + chunk->size) {
277 int bit = ((unsigned long) (addr - chunk->data)) /
278 pool->ptsize;
279
280 clear_bit(bit, chunk->bitmap);
281 memset(addr, 0, pool->ptsize);
282
283 if (chunk->dynamic &&
284 bitmap_empty(chunk->bitmap, chunk->count))
285 _kgsl_ptpool_rm_chunk(chunk);
286
287 break;
288 }
289 }
290
291 mutex_unlock(&pool->lock);
292}
293
294void kgsl_gpummu_ptpool_destroy(void *ptpool)
295{
296 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
297 struct kgsl_ptpool_chunk *chunk, *tmp;
298
299 if (pool == NULL)
300 return;
301
302 mutex_lock(&pool->lock);
303 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
304 _kgsl_ptpool_rm_chunk(chunk);
305 mutex_unlock(&pool->lock);
306
307 kfree(pool);
308}
309
310/**
311 * kgsl_ptpool_init
312 * @pool: A pointer to a ptpool structure to initialize
313 * @ptsize: The size of each pagetable entry
314 * @entries: The number of inital entries to add to the pool
315 *
316 * Initalize a pool and allocate an initial chunk of entries.
317 */
318void *kgsl_gpummu_ptpool_init(int ptsize, int entries)
319{
320 struct kgsl_ptpool *pool;
321 int ret = 0;
322 BUG_ON(ptsize == 0);
323
324 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
325 if (!pool) {
326 KGSL_CORE_ERR("Failed to allocate memory "
327 "for ptpool\n");
328 return NULL;
329 }
330
331 pool->ptsize = ptsize;
332 mutex_init(&pool->lock);
333 INIT_LIST_HEAD(&pool->list);
334
335 if (entries) {
336 ret = kgsl_ptpool_add(pool, entries);
337 if (ret)
338 goto err_ptpool_remove;
339 }
340
341 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
342 if (ret) {
343 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
344 "statistics: %d\n", ret);
345 goto err_ptpool_remove;
346 }
347 return (void *)pool;
348
349err_ptpool_remove:
350 kgsl_gpummu_ptpool_destroy(pool);
351 return NULL;
352}
353
354int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
355 unsigned int pt_base)
356{
357 struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
358 return pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
359}
360
361void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
362{
363 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
364 mmu_specific_pt;
365 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
366 gpummu_pt->base.hostptr);
367
368 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
369
370 kfree(gpummu_pt->tlbflushfilter.base);
371
372 kfree(gpummu_pt);
373}
374
375static inline uint32_t
376kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
377{
378 return (va - va_base) >> PAGE_SHIFT;
379}
380
381static inline void
382kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
383{
384 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
385
386 writel_relaxed(val, &baseptr[pte]);
387}
388
389static inline uint32_t
390kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
391{
392 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
393 return readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
394}
395
396static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt,
397 enum kgsl_deviceid id)
398{
399 unsigned int result = 0;
400 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
401 pt->priv;
402
403 if (pt == NULL)
404 return 0;
405
406 spin_lock(&pt->lock);
407 if (gpummu_pt->tlb_flags && (1<<id)) {
408 result = KGSL_MMUFLAGS_TLBFLUSH;
409 gpummu_pt->tlb_flags &= ~(1<<id);
410 }
411 spin_unlock(&pt->lock);
412 return result;
413}
414
415static void kgsl_gpummu_pagefault(struct kgsl_device *device)
416{
417 unsigned int reg;
418 unsigned int ptbase;
419
420 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
421 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
422
423 KGSL_MEM_CRIT(device,
424 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
425 reg & ~(PAGE_SIZE - 1),
426 kgsl_mmu_get_ptname_from_ptbase(ptbase),
427 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
428}
429
430static void *kgsl_gpummu_create_pagetable(void)
431{
432 struct kgsl_gpummu_pt *gpummu_pt;
433
434 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
435 GFP_KERNEL);
436 if (!gpummu_pt)
437 return NULL;
438
439 gpummu_pt->tlb_flags = 0;
440 gpummu_pt->last_superpte = 0;
441
442 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
443 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
444 gpummu_pt->tlbflushfilter.base = (unsigned int *)
445 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
446 if (!gpummu_pt->tlbflushfilter.base) {
447 KGSL_CORE_ERR("kzalloc(%d) failed\n",
448 gpummu_pt->tlbflushfilter.size);
449 goto err_free_gpummu;
450 }
451 GSL_TLBFLUSH_FILTER_RESET();
452
453 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
454 kgsl_driver.ptpool,
455 &gpummu_pt->base.physaddr);
456
457 if (gpummu_pt->base.hostptr == NULL)
458 goto err_flushfilter;
459
460 /* ptpool allocations are from coherent memory, so update the
461 device statistics acordingly */
462
463 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
464 kgsl_driver.stats.coherent_max);
465
466 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
467 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
468
469 return (void *)gpummu_pt;
470
471err_flushfilter:
472 kfree(gpummu_pt->tlbflushfilter.base);
473err_free_gpummu:
474 kfree(gpummu_pt);
475
476 return NULL;
477}
478
479static void kgsl_gpummu_default_setstate(struct kgsl_device *device,
480 uint32_t flags)
481{
482 struct kgsl_gpummu_pt *gpummu_pt;
483 if (!kgsl_mmu_enabled())
484 return;
485
486 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
487 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
488 gpummu_pt = device->mmu.hwpagetable->priv;
489 kgsl_regwrite(device, MH_MMU_PT_BASE,
490 gpummu_pt->base.gpuaddr);
491 }
492
493 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
494 /* Invalidate all and tc */
495 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
496 }
497}
498
499static void kgsl_gpummu_setstate(struct kgsl_device *device,
500 struct kgsl_pagetable *pagetable)
501{
502 struct kgsl_mmu *mmu = &device->mmu;
503 struct kgsl_gpummu_pt *gpummu_pt;
504
505 if (mmu->flags & KGSL_FLAGS_STARTED) {
506 /* page table not current, then setup mmu to use new
507 * specified page table
508 */
509 if (mmu->hwpagetable != pagetable) {
510 mmu->hwpagetable = pagetable;
511 spin_lock(&mmu->hwpagetable->lock);
512 gpummu_pt = mmu->hwpagetable->priv;
513 gpummu_pt->tlb_flags &= ~(1<<device->id);
514 spin_unlock(&mmu->hwpagetable->lock);
515
516 /* call device specific set page table */
517 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
518 KGSL_MMUFLAGS_PTUPDATE);
519 }
520 }
521}
522
523static int kgsl_gpummu_init(struct kgsl_device *device)
524{
525 /*
526 * intialize device mmu
527 *
528 * call this with the global lock held
529 */
530 int status = 0;
531 struct kgsl_mmu *mmu = &device->mmu;
532
533 mmu->device = device;
534
535 /* sub-client MMU lookups require address translation */
536 if ((mmu->config & ~0x1) > 0) {
537 /*make sure virtual address range is a multiple of 64Kb */
538 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
539 KGSL_CORE_ERR("Invalid pagetable size requested "
540 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
541 return -EINVAL;
542 }
543
544 /* allocate memory used for completing r/w operations that
545 * cannot be mapped by the MMU
546 */
547 status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
548 if (!status)
549 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
550 mmu->setstate_memory.size);
551 }
552
553 dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n",
554 __func__);
555 return status;
556}
557
558static int kgsl_gpummu_start(struct kgsl_device *device)
559{
560 /*
561 * intialize device mmu
562 *
563 * call this with the global lock held
564 */
565
566 struct kgsl_mmu *mmu = &device->mmu;
567 struct kgsl_gpummu_pt *gpummu_pt;
568
569 if (mmu->flags & KGSL_FLAGS_STARTED)
570 return 0;
571
572 /* MMU not enabled */
573 if ((mmu->config & 0x1) == 0)
574 return 0;
575
576 /* setup MMU and sub-client behavior */
577 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
578
579 /* idle device */
580 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
581
582 /* enable axi interrupts */
583 kgsl_regwrite(device, MH_INTERRUPT_MASK,
584 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
585
586 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
587 mmu->setstate_memory.size);
588
589 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
590 * to complete transactions in case of an MMU fault. Note that
591 * we'll leave the bottom 32 bytes of the setstate_memory for other
592 * purposes (e.g. use it when dummy read cycles are needed
593 * for other blocks) */
594 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
595 mmu->setstate_memory.physaddr + 32);
596
597 if (mmu->defaultpagetable == NULL)
598 mmu->defaultpagetable =
599 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
600
601 /* Return error if the default pagetable doesn't exist */
602 if (mmu->defaultpagetable == NULL)
603 return -ENOMEM;
604
605 mmu->hwpagetable = mmu->defaultpagetable;
606 gpummu_pt = mmu->hwpagetable->priv;
607 kgsl_regwrite(device, MH_MMU_PT_BASE,
608 gpummu_pt->base.gpuaddr);
609 kgsl_regwrite(device, MH_MMU_VA_RANGE,
610 (KGSL_PAGETABLE_BASE |
611 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
612 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
613 mmu->flags |= KGSL_FLAGS_STARTED;
614
615 return 0;
616}
617
618static int
619kgsl_gpummu_unmap(void *mmu_specific_pt,
620 struct kgsl_memdesc *memdesc)
621{
622 unsigned int numpages;
623 unsigned int pte, ptefirst, ptelast, superpte;
624 unsigned int range = memdesc->size;
625 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
626
627 /* All GPU addresses as assigned are page aligned, but some
628 functions purturb the gpuaddr with an offset, so apply the
629 mask here to make sure we have the right address */
630
631 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
632
633 numpages = (range >> PAGE_SHIFT);
634 if (range & (PAGE_SIZE - 1))
635 numpages++;
636
637 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
638 ptelast = ptefirst + numpages;
639
640 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
641 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
642 for (pte = ptefirst; pte < ptelast; pte++) {
643#ifdef VERBOSE_DEBUG
644 /* check if PTE exists */
645 if (!kgsl_pt_map_get(gpummu_pt, pte))
646 KGSL_CORE_ERR("pt entry %x is already "
647 "unmapped for pagetable %p\n", pte, gpummu_pt);
648#endif
649 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
650 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
651 if (pte == superpte)
652 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
653 GSL_PT_SUPER_PTE);
654 }
655
656 /* Post all writes to the pagetable */
657 wmb();
658
659 return 0;
660}
661
662static int
663kgsl_gpummu_map(void *mmu_specific_pt,
664 struct kgsl_memdesc *memdesc,
665 unsigned int protflags)
666{
667 int numpages;
668 unsigned int pte, ptefirst, ptelast, physaddr;
669 int flushtlb;
670 unsigned int offset = 0;
671 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
672
673 if (!protflags ||
674 protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV)) {
675 KGSL_CORE_ERR("Invalid protflags for "
676 "kgsl_mmu_specific_map: %x", protflags);
677 return -EINVAL;
678 }
679
680 numpages = (memdesc->size >> PAGE_SHIFT);
681
682 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
683 ptelast = ptefirst + numpages;
684
685 pte = ptefirst;
686 flushtlb = 0;
687
688 /* tlb needs to be flushed when the first and last pte are not at
689 * superpte boundaries */
690 if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
691 ((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
692 flushtlb = 1;
693
694 for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
695#ifdef VERBOSE_DEBUG
696 /* check if PTE exists */
697 uint32_t val = kgsl_pt_map_get(gpummu_pt, pte);
698 if (val != 0 && val != GSL_PT_PAGE_DIRTY) {
699 KGSL_CORE_ERR("pt entry %x is already set with "
700 "value %x for pagetable %p\n", pte, val, gpummu_pt);
701 return -EINVAL;
702 }
703#endif
704 if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
705 if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
706 flushtlb = 1;
707 /* mark pte as in use */
708
709 physaddr = memdesc->ops->physaddr(memdesc, offset);
710 if (!physaddr) {
711 KGSL_CORE_ERR("Failed to convert %x address to "
712 "physical", (unsigned int)memdesc->hostptr + offset);
713 kgsl_gpummu_unmap(mmu_specific_pt, memdesc);
714 return -EFAULT;
715 }
716 kgsl_pt_map_set(gpummu_pt, pte, physaddr | protflags);
717 }
718
719 /* Post all writes to the pagetable */
720 wmb();
721
722 /* Invalidate tlb only if current page table used by GPU is the
723 * pagetable that we used to allocate */
724 if (flushtlb) {
725 /*set all devices as needing flushing*/
726 gpummu_pt->tlb_flags = UINT_MAX;
727 GSL_TLBFLUSH_FILTER_RESET();
728 }
729
730 return 0;
731}
732
733static int kgsl_gpummu_stop(struct kgsl_device *device)
734{
735 struct kgsl_mmu *mmu = &device->mmu;
736
737 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
738 mmu->flags &= ~KGSL_FLAGS_STARTED;
739
740 return 0;
741}
742
743static int kgsl_gpummu_close(struct kgsl_device *device)
744{
745 /*
746 * close device mmu
747 *
748 * call this with the global lock held
749 */
750 struct kgsl_mmu *mmu = &device->mmu;
751
752 if (mmu->setstate_memory.gpuaddr)
753 kgsl_sharedmem_free(&mmu->setstate_memory);
754
755 if (mmu->defaultpagetable)
756 kgsl_mmu_putpagetable(mmu->defaultpagetable);
757
758 return 0;
759}
760
761static unsigned int
762kgsl_gpummu_get_current_ptbase(struct kgsl_device *device)
763{
764 unsigned int ptbase;
765 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
766 return ptbase;
767}
768
769struct kgsl_mmu_ops gpummu_ops = {
770 .mmu_init = kgsl_gpummu_init,
771 .mmu_close = kgsl_gpummu_close,
772 .mmu_start = kgsl_gpummu_start,
773 .mmu_stop = kgsl_gpummu_stop,
774 .mmu_setstate = kgsl_gpummu_setstate,
775 .mmu_device_setstate = kgsl_gpummu_default_setstate,
776 .mmu_pagefault = kgsl_gpummu_pagefault,
777 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
778};
779
780struct kgsl_mmu_pt_ops gpummu_pt_ops = {
781 .mmu_map = kgsl_gpummu_map,
782 .mmu_unmap = kgsl_gpummu_unmap,
783 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
784 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
785 .mmu_pt_equal = kgsl_gpummu_pt_equal,
786 .mmu_pt_get_flags = kgsl_gpummu_pt_get_flags,
787};