blob: 65bcb09af9a31776153d61396d471249abf3728b [file] [log] [blame]
Shubhraprakash Dasc0f21b62012-02-16 11:24:43 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060025#define KGSL_PAGETABLE_SIZE \
26 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
27 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
28
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060029static ssize_t
30sysfs_show_ptpool_entries(struct kobject *kobj,
31 struct kobj_attribute *attr,
32 char *buf)
33{
34 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
35 kgsl_driver.ptpool;
36 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
37}
38
39static ssize_t
40sysfs_show_ptpool_min(struct kobject *kobj,
41 struct kobj_attribute *attr,
42 char *buf)
43{
44 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
45 kgsl_driver.ptpool;
46 return snprintf(buf, PAGE_SIZE, "%d\n",
47 pool->static_entries);
48}
49
50static ssize_t
51sysfs_show_ptpool_chunks(struct kobject *kobj,
52 struct kobj_attribute *attr,
53 char *buf)
54{
55 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
56 kgsl_driver.ptpool;
57 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
58}
59
60static ssize_t
61sysfs_show_ptpool_ptsize(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 char *buf)
64{
65 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
66 kgsl_driver.ptpool;
67 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
68}
69
70static struct kobj_attribute attr_ptpool_entries = {
71 .attr = { .name = "ptpool_entries", .mode = 0444 },
72 .show = sysfs_show_ptpool_entries,
73 .store = NULL,
74};
75
76static struct kobj_attribute attr_ptpool_min = {
77 .attr = { .name = "ptpool_min", .mode = 0444 },
78 .show = sysfs_show_ptpool_min,
79 .store = NULL,
80};
81
82static struct kobj_attribute attr_ptpool_chunks = {
83 .attr = { .name = "ptpool_chunks", .mode = 0444 },
84 .show = sysfs_show_ptpool_chunks,
85 .store = NULL,
86};
87
88static struct kobj_attribute attr_ptpool_ptsize = {
89 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
90 .show = sysfs_show_ptpool_ptsize,
91 .store = NULL,
92};
93
94static struct attribute *ptpool_attrs[] = {
95 &attr_ptpool_entries.attr,
96 &attr_ptpool_min.attr,
97 &attr_ptpool_chunks.attr,
98 &attr_ptpool_ptsize.attr,
99 NULL,
100};
101
102static struct attribute_group ptpool_attr_group = {
103 .attrs = ptpool_attrs,
104};
105
106static int
107_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
108{
109 struct kgsl_ptpool_chunk *chunk;
110 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
111
112 BUG_ON(count == 0);
113
114 if (get_order(size) >= MAX_ORDER) {
115 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
116 return -EINVAL;
117 }
118
119 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
120 if (chunk == NULL) {
121 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
122 return -ENOMEM;
123 }
124
125 chunk->size = size;
126 chunk->count = count;
127 chunk->dynamic = dynamic;
128
129 chunk->data = dma_alloc_coherent(NULL, size,
130 &chunk->phys, GFP_KERNEL);
131
132 if (chunk->data == NULL) {
133 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
134 goto err;
135 }
136
137 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
138
139 if (chunk->bitmap == NULL) {
140 KGSL_CORE_ERR("kzalloc(%d) failed\n",
141 BITS_TO_LONGS(count) * 4);
142 goto err_dma;
143 }
144
145 list_add_tail(&chunk->list, &pool->list);
146
147 pool->chunks++;
148 pool->entries += count;
149
150 if (!dynamic)
151 pool->static_entries += count;
152
153 return 0;
154
155err_dma:
156 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
157err:
158 kfree(chunk);
159 return -ENOMEM;
160}
161
162static void *
163_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
164{
165 struct kgsl_ptpool_chunk *chunk;
166
167 list_for_each_entry(chunk, &pool->list, list) {
168 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
169
170 if (bit >= chunk->count)
171 continue;
172
173 set_bit(bit, chunk->bitmap);
174 *physaddr = chunk->phys + (bit * pool->ptsize);
175
176 return chunk->data + (bit * pool->ptsize);
177 }
178
179 return NULL;
180}
181
182/**
183 * kgsl_ptpool_add
184 * @pool: A pointer to a ptpool structure
185 * @entries: Number of entries to add
186 *
187 * Add static entries to the pagetable pool.
188 */
189
190static int
191kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
192{
193 int ret = 0;
194 BUG_ON(count == 0);
195
196 mutex_lock(&pool->lock);
197
198 /* Only 4MB can be allocated in one chunk, so larger allocations
199 need to be split into multiple sections */
200
201 while (count) {
202 int entries = ((count * pool->ptsize) > SZ_4M) ?
203 SZ_4M / pool->ptsize : count;
204
205 /* Add the entries as static, i.e. they don't ever stand
206 a chance of being removed */
207
208 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
209 if (ret)
210 break;
211
212 count -= entries;
213 }
214
215 mutex_unlock(&pool->lock);
216 return ret;
217}
218
219/**
220 * kgsl_ptpool_alloc
221 * @pool: A pointer to a ptpool structure
222 * @addr: A pointer to store the physical address of the chunk
223 *
224 * Allocate a pagetable from the pool. Returns the virtual address
225 * of the pagetable, the physical address is returned in physaddr
226 */
227
228static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
229 unsigned int *physaddr)
230{
231 void *addr = NULL;
232 int ret;
233
234 mutex_lock(&pool->lock);
235 addr = _kgsl_ptpool_get_entry(pool, physaddr);
236 if (addr)
237 goto done;
238
239 /* Add a chunk for 1 more pagetable and mark it as dynamic */
240 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
241
242 if (ret)
243 goto done;
244
245 addr = _kgsl_ptpool_get_entry(pool, physaddr);
246done:
247 mutex_unlock(&pool->lock);
248 return addr;
249}
250
251static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
252{
253 list_del(&chunk->list);
254
255 if (chunk->data)
256 dma_free_coherent(NULL, chunk->size, chunk->data,
257 chunk->phys);
258 kfree(chunk->bitmap);
259 kfree(chunk);
260}
261
262/**
263 * kgsl_ptpool_free
264 * @pool: A pointer to a ptpool structure
265 * @addr: A pointer to the virtual address to free
266 *
267 * Free a pagetable allocated from the pool
268 */
269
270static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
271{
272 struct kgsl_ptpool_chunk *chunk, *tmp;
273
274 if (pool == NULL || addr == NULL)
275 return;
276
277 mutex_lock(&pool->lock);
278 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
279 if (addr >= chunk->data &&
280 addr < chunk->data + chunk->size) {
281 int bit = ((unsigned long) (addr - chunk->data)) /
282 pool->ptsize;
283
284 clear_bit(bit, chunk->bitmap);
285 memset(addr, 0, pool->ptsize);
286
287 if (chunk->dynamic &&
288 bitmap_empty(chunk->bitmap, chunk->count))
289 _kgsl_ptpool_rm_chunk(chunk);
290
291 break;
292 }
293 }
294
295 mutex_unlock(&pool->lock);
296}
297
298void kgsl_gpummu_ptpool_destroy(void *ptpool)
299{
300 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
301 struct kgsl_ptpool_chunk *chunk, *tmp;
302
303 if (pool == NULL)
304 return;
305
306 mutex_lock(&pool->lock);
307 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
308 _kgsl_ptpool_rm_chunk(chunk);
309 mutex_unlock(&pool->lock);
310
311 kfree(pool);
312}
313
314/**
315 * kgsl_ptpool_init
316 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600317 * @entries: The number of inital entries to add to the pool
318 *
319 * Initalize a pool and allocate an initial chunk of entries.
320 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600321void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600322{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600323 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600324 struct kgsl_ptpool *pool;
325 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326
327 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
328 if (!pool) {
329 KGSL_CORE_ERR("Failed to allocate memory "
330 "for ptpool\n");
331 return NULL;
332 }
333
334 pool->ptsize = ptsize;
335 mutex_init(&pool->lock);
336 INIT_LIST_HEAD(&pool->list);
337
338 if (entries) {
339 ret = kgsl_ptpool_add(pool, entries);
340 if (ret)
341 goto err_ptpool_remove;
342 }
343
344 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
345 if (ret) {
346 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
347 "statistics: %d\n", ret);
348 goto err_ptpool_remove;
349 }
350 return (void *)pool;
351
352err_ptpool_remove:
353 kgsl_gpummu_ptpool_destroy(pool);
354 return NULL;
355}
356
357int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
358 unsigned int pt_base)
359{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700360 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
361 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600362}
363
364void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
365{
366 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
367 mmu_specific_pt;
368 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
369 gpummu_pt->base.hostptr);
370
371 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
372
373 kfree(gpummu_pt->tlbflushfilter.base);
374
375 kfree(gpummu_pt);
376}
377
378static inline uint32_t
379kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
380{
381 return (va - va_base) >> PAGE_SHIFT;
382}
383
384static inline void
385kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
386{
387 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700388 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
389 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600390}
391
392static inline uint32_t
393kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
394{
395 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700396 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
397 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600398}
399
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600400static void kgsl_gpummu_pagefault(struct kgsl_device *device)
401{
402 unsigned int reg;
403 unsigned int ptbase;
404
405 kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
406 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
407
408 KGSL_MEM_CRIT(device,
409 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
410 reg & ~(PAGE_SIZE - 1),
411 kgsl_mmu_get_ptname_from_ptbase(ptbase),
412 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
413}
414
415static void *kgsl_gpummu_create_pagetable(void)
416{
417 struct kgsl_gpummu_pt *gpummu_pt;
418
419 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
420 GFP_KERNEL);
421 if (!gpummu_pt)
422 return NULL;
423
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600424 gpummu_pt->last_superpte = 0;
425
426 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
427 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
428 gpummu_pt->tlbflushfilter.base = (unsigned int *)
429 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
430 if (!gpummu_pt->tlbflushfilter.base) {
431 KGSL_CORE_ERR("kzalloc(%d) failed\n",
432 gpummu_pt->tlbflushfilter.size);
433 goto err_free_gpummu;
434 }
435 GSL_TLBFLUSH_FILTER_RESET();
436
437 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
438 kgsl_driver.ptpool,
439 &gpummu_pt->base.physaddr);
440
441 if (gpummu_pt->base.hostptr == NULL)
442 goto err_flushfilter;
443
444 /* ptpool allocations are from coherent memory, so update the
445 device statistics acordingly */
446
447 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
448 kgsl_driver.stats.coherent_max);
449
450 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
451 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
452
453 return (void *)gpummu_pt;
454
455err_flushfilter:
456 kfree(gpummu_pt->tlbflushfilter.base);
457err_free_gpummu:
458 kfree(gpummu_pt);
459
460 return NULL;
461}
462
463static void kgsl_gpummu_default_setstate(struct kgsl_device *device,
464 uint32_t flags)
465{
466 struct kgsl_gpummu_pt *gpummu_pt;
467 if (!kgsl_mmu_enabled())
468 return;
469
470 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
471 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
472 gpummu_pt = device->mmu.hwpagetable->priv;
473 kgsl_regwrite(device, MH_MMU_PT_BASE,
474 gpummu_pt->base.gpuaddr);
475 }
476
477 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
478 /* Invalidate all and tc */
479 kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
480 }
481}
482
483static void kgsl_gpummu_setstate(struct kgsl_device *device,
484 struct kgsl_pagetable *pagetable)
485{
486 struct kgsl_mmu *mmu = &device->mmu;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600487
488 if (mmu->flags & KGSL_FLAGS_STARTED) {
489 /* page table not current, then setup mmu to use new
490 * specified page table
491 */
492 if (mmu->hwpagetable != pagetable) {
493 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600494 /* Since we do a TLB flush the tlb_flags should
495 * be cleared by calling kgsl_mmu_pt_get_flags
496 */
497 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600498
499 /* call device specific set page table */
500 kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
501 KGSL_MMUFLAGS_PTUPDATE);
502 }
503 }
504}
505
506static int kgsl_gpummu_init(struct kgsl_device *device)
507{
508 /*
509 * intialize device mmu
510 *
511 * call this with the global lock held
512 */
513 int status = 0;
514 struct kgsl_mmu *mmu = &device->mmu;
515
516 mmu->device = device;
517
518 /* sub-client MMU lookups require address translation */
519 if ((mmu->config & ~0x1) > 0) {
520 /*make sure virtual address range is a multiple of 64Kb */
521 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
522 KGSL_CORE_ERR("Invalid pagetable size requested "
523 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
524 return -EINVAL;
525 }
526
527 /* allocate memory used for completing r/w operations that
528 * cannot be mapped by the MMU
529 */
530 status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
531 if (!status)
532 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
533 mmu->setstate_memory.size);
534 }
535
536 dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n",
537 __func__);
538 return status;
539}
540
541static int kgsl_gpummu_start(struct kgsl_device *device)
542{
543 /*
544 * intialize device mmu
545 *
546 * call this with the global lock held
547 */
548
549 struct kgsl_mmu *mmu = &device->mmu;
550 struct kgsl_gpummu_pt *gpummu_pt;
551
552 if (mmu->flags & KGSL_FLAGS_STARTED)
553 return 0;
554
555 /* MMU not enabled */
556 if ((mmu->config & 0x1) == 0)
557 return 0;
558
559 /* setup MMU and sub-client behavior */
560 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
561
562 /* idle device */
563 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
564
565 /* enable axi interrupts */
566 kgsl_regwrite(device, MH_INTERRUPT_MASK,
567 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
568
569 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
570 mmu->setstate_memory.size);
571
572 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
573 * to complete transactions in case of an MMU fault. Note that
574 * we'll leave the bottom 32 bytes of the setstate_memory for other
575 * purposes (e.g. use it when dummy read cycles are needed
576 * for other blocks) */
577 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
578 mmu->setstate_memory.physaddr + 32);
579
580 if (mmu->defaultpagetable == NULL)
581 mmu->defaultpagetable =
582 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
583
584 /* Return error if the default pagetable doesn't exist */
585 if (mmu->defaultpagetable == NULL)
586 return -ENOMEM;
587
588 mmu->hwpagetable = mmu->defaultpagetable;
589 gpummu_pt = mmu->hwpagetable->priv;
590 kgsl_regwrite(device, MH_MMU_PT_BASE,
591 gpummu_pt->base.gpuaddr);
592 kgsl_regwrite(device, MH_MMU_VA_RANGE,
593 (KGSL_PAGETABLE_BASE |
594 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
595 kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
596 mmu->flags |= KGSL_FLAGS_STARTED;
597
598 return 0;
599}
600
601static int
602kgsl_gpummu_unmap(void *mmu_specific_pt,
603 struct kgsl_memdesc *memdesc)
604{
605 unsigned int numpages;
606 unsigned int pte, ptefirst, ptelast, superpte;
607 unsigned int range = memdesc->size;
608 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
609
610 /* All GPU addresses as assigned are page aligned, but some
611 functions purturb the gpuaddr with an offset, so apply the
612 mask here to make sure we have the right address */
613
614 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
615
616 numpages = (range >> PAGE_SHIFT);
617 if (range & (PAGE_SIZE - 1))
618 numpages++;
619
620 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
621 ptelast = ptefirst + numpages;
622
623 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
624 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
625 for (pte = ptefirst; pte < ptelast; pte++) {
626#ifdef VERBOSE_DEBUG
627 /* check if PTE exists */
628 if (!kgsl_pt_map_get(gpummu_pt, pte))
629 KGSL_CORE_ERR("pt entry %x is already "
630 "unmapped for pagetable %p\n", pte, gpummu_pt);
631#endif
632 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
633 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
634 if (pte == superpte)
635 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
636 GSL_PT_SUPER_PTE);
637 }
638
639 /* Post all writes to the pagetable */
640 wmb();
641
642 return 0;
643}
644
Jordan Croused17e9aa2011-10-12 16:57:48 -0600645#define SUPERPTE_IS_DIRTY(_p) \
646(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
647GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
648
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600649static int
650kgsl_gpummu_map(void *mmu_specific_pt,
651 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600652 unsigned int protflags,
653 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600654{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600655 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600656 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600657 struct scatterlist *s;
658 int flushtlb = 0;
659 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600660
Jordan Croused17e9aa2011-10-12 16:57:48 -0600661 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600662
Jordan Croused17e9aa2011-10-12 16:57:48 -0600663 /* Flush the TLB if the first PTE isn't at the superpte boundary */
664 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600665 flushtlb = 1;
666
Jordan Croused17e9aa2011-10-12 16:57:48 -0600667 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600668 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600669 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600670
Jordan Croused17e9aa2011-10-12 16:57:48 -0600671 /* Each sg entry might be multiple pages long */
672 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
673 if (SUPERPTE_IS_DIRTY(pte))
674 flushtlb = 1;
675 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600676 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600677 }
678
Jordan Croused17e9aa2011-10-12 16:57:48 -0600679 /* Flush the TLB if the last PTE isn't at the superpte boundary */
680 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
681 flushtlb = 1;
682
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600683 wmb();
684
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600685 if (flushtlb) {
686 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600687 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600688 GSL_TLBFLUSH_FILTER_RESET();
689 }
690
691 return 0;
692}
693
694static int kgsl_gpummu_stop(struct kgsl_device *device)
695{
696 struct kgsl_mmu *mmu = &device->mmu;
697
698 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
699 mmu->flags &= ~KGSL_FLAGS_STARTED;
700
701 return 0;
702}
703
704static int kgsl_gpummu_close(struct kgsl_device *device)
705{
706 /*
707 * close device mmu
708 *
709 * call this with the global lock held
710 */
711 struct kgsl_mmu *mmu = &device->mmu;
712
713 if (mmu->setstate_memory.gpuaddr)
714 kgsl_sharedmem_free(&mmu->setstate_memory);
715
716 if (mmu->defaultpagetable)
717 kgsl_mmu_putpagetable(mmu->defaultpagetable);
718
719 return 0;
720}
721
722static unsigned int
723kgsl_gpummu_get_current_ptbase(struct kgsl_device *device)
724{
725 unsigned int ptbase;
726 kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
727 return ptbase;
728}
729
730struct kgsl_mmu_ops gpummu_ops = {
731 .mmu_init = kgsl_gpummu_init,
732 .mmu_close = kgsl_gpummu_close,
733 .mmu_start = kgsl_gpummu_start,
734 .mmu_stop = kgsl_gpummu_stop,
735 .mmu_setstate = kgsl_gpummu_setstate,
736 .mmu_device_setstate = kgsl_gpummu_default_setstate,
737 .mmu_pagefault = kgsl_gpummu_pagefault,
738 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
739};
740
741struct kgsl_mmu_pt_ops gpummu_pt_ops = {
742 .mmu_map = kgsl_gpummu_map,
743 .mmu_unmap = kgsl_gpummu_unmap,
744 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
745 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
746 .mmu_pt_equal = kgsl_gpummu_pt_equal,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600747};