blob: f49d3e3a0d1ca9f45e0858ab6a4d6ea3675abc5f [file] [log] [blame]
Shubhraprakash Dasc0f21b62012-02-16 11:24:43 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060025#define KGSL_PAGETABLE_SIZE \
26 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
27 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
28
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060029static ssize_t
30sysfs_show_ptpool_entries(struct kobject *kobj,
31 struct kobj_attribute *attr,
32 char *buf)
33{
34 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
35 kgsl_driver.ptpool;
36 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
37}
38
39static ssize_t
40sysfs_show_ptpool_min(struct kobject *kobj,
41 struct kobj_attribute *attr,
42 char *buf)
43{
44 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
45 kgsl_driver.ptpool;
46 return snprintf(buf, PAGE_SIZE, "%d\n",
47 pool->static_entries);
48}
49
50static ssize_t
51sysfs_show_ptpool_chunks(struct kobject *kobj,
52 struct kobj_attribute *attr,
53 char *buf)
54{
55 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
56 kgsl_driver.ptpool;
57 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
58}
59
60static ssize_t
61sysfs_show_ptpool_ptsize(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 char *buf)
64{
65 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
66 kgsl_driver.ptpool;
67 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
68}
69
70static struct kobj_attribute attr_ptpool_entries = {
71 .attr = { .name = "ptpool_entries", .mode = 0444 },
72 .show = sysfs_show_ptpool_entries,
73 .store = NULL,
74};
75
76static struct kobj_attribute attr_ptpool_min = {
77 .attr = { .name = "ptpool_min", .mode = 0444 },
78 .show = sysfs_show_ptpool_min,
79 .store = NULL,
80};
81
82static struct kobj_attribute attr_ptpool_chunks = {
83 .attr = { .name = "ptpool_chunks", .mode = 0444 },
84 .show = sysfs_show_ptpool_chunks,
85 .store = NULL,
86};
87
88static struct kobj_attribute attr_ptpool_ptsize = {
89 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
90 .show = sysfs_show_ptpool_ptsize,
91 .store = NULL,
92};
93
94static struct attribute *ptpool_attrs[] = {
95 &attr_ptpool_entries.attr,
96 &attr_ptpool_min.attr,
97 &attr_ptpool_chunks.attr,
98 &attr_ptpool_ptsize.attr,
99 NULL,
100};
101
102static struct attribute_group ptpool_attr_group = {
103 .attrs = ptpool_attrs,
104};
105
106static int
107_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
108{
109 struct kgsl_ptpool_chunk *chunk;
110 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
111
112 BUG_ON(count == 0);
113
114 if (get_order(size) >= MAX_ORDER) {
115 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
116 return -EINVAL;
117 }
118
119 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
120 if (chunk == NULL) {
121 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
122 return -ENOMEM;
123 }
124
125 chunk->size = size;
126 chunk->count = count;
127 chunk->dynamic = dynamic;
128
129 chunk->data = dma_alloc_coherent(NULL, size,
130 &chunk->phys, GFP_KERNEL);
131
132 if (chunk->data == NULL) {
133 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
134 goto err;
135 }
136
137 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
138
139 if (chunk->bitmap == NULL) {
140 KGSL_CORE_ERR("kzalloc(%d) failed\n",
141 BITS_TO_LONGS(count) * 4);
142 goto err_dma;
143 }
144
145 list_add_tail(&chunk->list, &pool->list);
146
147 pool->chunks++;
148 pool->entries += count;
149
150 if (!dynamic)
151 pool->static_entries += count;
152
153 return 0;
154
155err_dma:
156 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
157err:
158 kfree(chunk);
159 return -ENOMEM;
160}
161
162static void *
163_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
164{
165 struct kgsl_ptpool_chunk *chunk;
166
167 list_for_each_entry(chunk, &pool->list, list) {
168 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
169
170 if (bit >= chunk->count)
171 continue;
172
173 set_bit(bit, chunk->bitmap);
174 *physaddr = chunk->phys + (bit * pool->ptsize);
175
176 return chunk->data + (bit * pool->ptsize);
177 }
178
179 return NULL;
180}
181
182/**
183 * kgsl_ptpool_add
184 * @pool: A pointer to a ptpool structure
185 * @entries: Number of entries to add
186 *
187 * Add static entries to the pagetable pool.
188 */
189
190static int
191kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
192{
193 int ret = 0;
194 BUG_ON(count == 0);
195
196 mutex_lock(&pool->lock);
197
198 /* Only 4MB can be allocated in one chunk, so larger allocations
199 need to be split into multiple sections */
200
201 while (count) {
202 int entries = ((count * pool->ptsize) > SZ_4M) ?
203 SZ_4M / pool->ptsize : count;
204
205 /* Add the entries as static, i.e. they don't ever stand
206 a chance of being removed */
207
208 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
209 if (ret)
210 break;
211
212 count -= entries;
213 }
214
215 mutex_unlock(&pool->lock);
216 return ret;
217}
218
219/**
220 * kgsl_ptpool_alloc
221 * @pool: A pointer to a ptpool structure
222 * @addr: A pointer to store the physical address of the chunk
223 *
224 * Allocate a pagetable from the pool. Returns the virtual address
225 * of the pagetable, the physical address is returned in physaddr
226 */
227
228static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
229 unsigned int *physaddr)
230{
231 void *addr = NULL;
232 int ret;
233
234 mutex_lock(&pool->lock);
235 addr = _kgsl_ptpool_get_entry(pool, physaddr);
236 if (addr)
237 goto done;
238
239 /* Add a chunk for 1 more pagetable and mark it as dynamic */
240 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
241
242 if (ret)
243 goto done;
244
245 addr = _kgsl_ptpool_get_entry(pool, physaddr);
246done:
247 mutex_unlock(&pool->lock);
248 return addr;
249}
250
251static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
252{
253 list_del(&chunk->list);
254
255 if (chunk->data)
256 dma_free_coherent(NULL, chunk->size, chunk->data,
257 chunk->phys);
258 kfree(chunk->bitmap);
259 kfree(chunk);
260}
261
262/**
263 * kgsl_ptpool_free
264 * @pool: A pointer to a ptpool structure
265 * @addr: A pointer to the virtual address to free
266 *
267 * Free a pagetable allocated from the pool
268 */
269
270static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
271{
272 struct kgsl_ptpool_chunk *chunk, *tmp;
273
274 if (pool == NULL || addr == NULL)
275 return;
276
277 mutex_lock(&pool->lock);
278 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
279 if (addr >= chunk->data &&
280 addr < chunk->data + chunk->size) {
281 int bit = ((unsigned long) (addr - chunk->data)) /
282 pool->ptsize;
283
284 clear_bit(bit, chunk->bitmap);
285 memset(addr, 0, pool->ptsize);
286
287 if (chunk->dynamic &&
288 bitmap_empty(chunk->bitmap, chunk->count))
289 _kgsl_ptpool_rm_chunk(chunk);
290
291 break;
292 }
293 }
294
295 mutex_unlock(&pool->lock);
296}
297
298void kgsl_gpummu_ptpool_destroy(void *ptpool)
299{
300 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
301 struct kgsl_ptpool_chunk *chunk, *tmp;
302
303 if (pool == NULL)
304 return;
305
306 mutex_lock(&pool->lock);
307 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
308 _kgsl_ptpool_rm_chunk(chunk);
309 mutex_unlock(&pool->lock);
310
311 kfree(pool);
312}
313
314/**
315 * kgsl_ptpool_init
316 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600317 * @entries: The number of inital entries to add to the pool
318 *
319 * Initalize a pool and allocate an initial chunk of entries.
320 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600321void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600322{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600323 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600324 struct kgsl_ptpool *pool;
325 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326
327 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
328 if (!pool) {
329 KGSL_CORE_ERR("Failed to allocate memory "
330 "for ptpool\n");
331 return NULL;
332 }
333
334 pool->ptsize = ptsize;
335 mutex_init(&pool->lock);
336 INIT_LIST_HEAD(&pool->list);
337
338 if (entries) {
339 ret = kgsl_ptpool_add(pool, entries);
340 if (ret)
341 goto err_ptpool_remove;
342 }
343
344 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
345 if (ret) {
346 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
347 "statistics: %d\n", ret);
348 goto err_ptpool_remove;
349 }
350 return (void *)pool;
351
352err_ptpool_remove:
353 kgsl_gpummu_ptpool_destroy(pool);
354 return NULL;
355}
356
357int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
358 unsigned int pt_base)
359{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700360 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
361 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600362}
363
364void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
365{
366 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
367 mmu_specific_pt;
368 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
369 gpummu_pt->base.hostptr);
370
371 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
372
373 kfree(gpummu_pt->tlbflushfilter.base);
374
375 kfree(gpummu_pt);
376}
377
378static inline uint32_t
379kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
380{
381 return (va - va_base) >> PAGE_SHIFT;
382}
383
384static inline void
385kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
386{
387 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700388 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
389 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600390}
391
392static inline uint32_t
393kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
394{
395 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700396 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
397 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600398}
399
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600400static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600401{
402 unsigned int reg;
403 unsigned int ptbase;
404
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600405 kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
406 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600407
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600408 KGSL_MEM_CRIT(mmu->device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600409 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
410 reg & ~(PAGE_SIZE - 1),
411 kgsl_mmu_get_ptname_from_ptbase(ptbase),
412 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
413}
414
415static void *kgsl_gpummu_create_pagetable(void)
416{
417 struct kgsl_gpummu_pt *gpummu_pt;
418
419 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
420 GFP_KERNEL);
421 if (!gpummu_pt)
422 return NULL;
423
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600424 gpummu_pt->last_superpte = 0;
425
426 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
427 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
428 gpummu_pt->tlbflushfilter.base = (unsigned int *)
429 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
430 if (!gpummu_pt->tlbflushfilter.base) {
431 KGSL_CORE_ERR("kzalloc(%d) failed\n",
432 gpummu_pt->tlbflushfilter.size);
433 goto err_free_gpummu;
434 }
435 GSL_TLBFLUSH_FILTER_RESET();
436
437 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
438 kgsl_driver.ptpool,
439 &gpummu_pt->base.physaddr);
440
441 if (gpummu_pt->base.hostptr == NULL)
442 goto err_flushfilter;
443
444 /* ptpool allocations are from coherent memory, so update the
445 device statistics acordingly */
446
447 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
448 kgsl_driver.stats.coherent_max);
449
450 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
451 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
452
453 return (void *)gpummu_pt;
454
455err_flushfilter:
456 kfree(gpummu_pt->tlbflushfilter.base);
457err_free_gpummu:
458 kfree(gpummu_pt);
459
460 return NULL;
461}
462
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600463static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600464 uint32_t flags)
465{
466 struct kgsl_gpummu_pt *gpummu_pt;
467 if (!kgsl_mmu_enabled())
468 return;
469
470 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600471 kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
472 gpummu_pt = mmu->hwpagetable->priv;
473 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600474 gpummu_pt->base.gpuaddr);
475 }
476
477 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
478 /* Invalidate all and tc */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600479 kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600480 }
481}
482
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600483static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600484 struct kgsl_pagetable *pagetable)
485{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600486 if (mmu->flags & KGSL_FLAGS_STARTED) {
487 /* page table not current, then setup mmu to use new
488 * specified page table
489 */
490 if (mmu->hwpagetable != pagetable) {
491 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600492 /* Since we do a TLB flush the tlb_flags should
493 * be cleared by calling kgsl_mmu_pt_get_flags
494 */
495 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600496
497 /* call device specific set page table */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600498 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600499 KGSL_MMUFLAGS_PTUPDATE);
500 }
501 }
502}
503
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600504static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600505{
506 /*
507 * intialize device mmu
508 *
509 * call this with the global lock held
510 */
511 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600512
513 /* sub-client MMU lookups require address translation */
514 if ((mmu->config & ~0x1) > 0) {
515 /*make sure virtual address range is a multiple of 64Kb */
516 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
517 KGSL_CORE_ERR("Invalid pagetable size requested "
518 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
519 return -EINVAL;
520 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600521 }
522
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600523 dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600524 __func__);
525 return status;
526}
527
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600528static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600529{
530 /*
531 * intialize device mmu
532 *
533 * call this with the global lock held
534 */
535
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600536 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600537 struct kgsl_gpummu_pt *gpummu_pt;
538
539 if (mmu->flags & KGSL_FLAGS_STARTED)
540 return 0;
541
542 /* MMU not enabled */
543 if ((mmu->config & 0x1) == 0)
544 return 0;
545
546 /* setup MMU and sub-client behavior */
547 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
548
549 /* idle device */
550 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
551
552 /* enable axi interrupts */
553 kgsl_regwrite(device, MH_INTERRUPT_MASK,
554 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
555
556 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
557 mmu->setstate_memory.size);
558
559 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
560 * to complete transactions in case of an MMU fault. Note that
561 * we'll leave the bottom 32 bytes of the setstate_memory for other
562 * purposes (e.g. use it when dummy read cycles are needed
563 * for other blocks) */
564 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
565 mmu->setstate_memory.physaddr + 32);
566
567 if (mmu->defaultpagetable == NULL)
568 mmu->defaultpagetable =
569 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
570
571 /* Return error if the default pagetable doesn't exist */
572 if (mmu->defaultpagetable == NULL)
573 return -ENOMEM;
574
575 mmu->hwpagetable = mmu->defaultpagetable;
576 gpummu_pt = mmu->hwpagetable->priv;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600577 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600578 gpummu_pt->base.gpuaddr);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600579 kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600580 (KGSL_PAGETABLE_BASE |
581 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600582 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600583 mmu->flags |= KGSL_FLAGS_STARTED;
584
585 return 0;
586}
587
588static int
589kgsl_gpummu_unmap(void *mmu_specific_pt,
590 struct kgsl_memdesc *memdesc)
591{
592 unsigned int numpages;
593 unsigned int pte, ptefirst, ptelast, superpte;
594 unsigned int range = memdesc->size;
595 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
596
597 /* All GPU addresses as assigned are page aligned, but some
598 functions purturb the gpuaddr with an offset, so apply the
599 mask here to make sure we have the right address */
600
601 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
602
603 numpages = (range >> PAGE_SHIFT);
604 if (range & (PAGE_SIZE - 1))
605 numpages++;
606
607 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
608 ptelast = ptefirst + numpages;
609
610 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
611 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
612 for (pte = ptefirst; pte < ptelast; pte++) {
613#ifdef VERBOSE_DEBUG
614 /* check if PTE exists */
615 if (!kgsl_pt_map_get(gpummu_pt, pte))
616 KGSL_CORE_ERR("pt entry %x is already "
617 "unmapped for pagetable %p\n", pte, gpummu_pt);
618#endif
619 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
620 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
621 if (pte == superpte)
622 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
623 GSL_PT_SUPER_PTE);
624 }
625
626 /* Post all writes to the pagetable */
627 wmb();
628
629 return 0;
630}
631
Jordan Croused17e9aa2011-10-12 16:57:48 -0600632#define SUPERPTE_IS_DIRTY(_p) \
633(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
634GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
635
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600636static int
637kgsl_gpummu_map(void *mmu_specific_pt,
638 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600639 unsigned int protflags,
640 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600641{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600642 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600643 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600644 struct scatterlist *s;
645 int flushtlb = 0;
646 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600647
Jordan Croused17e9aa2011-10-12 16:57:48 -0600648 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600649
Jordan Croused17e9aa2011-10-12 16:57:48 -0600650 /* Flush the TLB if the first PTE isn't at the superpte boundary */
651 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600652 flushtlb = 1;
653
Jordan Croused17e9aa2011-10-12 16:57:48 -0600654 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600655 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600656 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600657
Jordan Croused17e9aa2011-10-12 16:57:48 -0600658 /* Each sg entry might be multiple pages long */
659 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
660 if (SUPERPTE_IS_DIRTY(pte))
661 flushtlb = 1;
662 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600663 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600664 }
665
Jordan Croused17e9aa2011-10-12 16:57:48 -0600666 /* Flush the TLB if the last PTE isn't at the superpte boundary */
667 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
668 flushtlb = 1;
669
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600670 wmb();
671
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600672 if (flushtlb) {
673 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600674 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600675 GSL_TLBFLUSH_FILTER_RESET();
676 }
677
678 return 0;
679}
680
Shubhraprakash Das79447952012-04-26 18:12:23 -0600681static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600682{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600683 kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600684 mmu->flags &= ~KGSL_FLAGS_STARTED;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600685}
686
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600687static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600688{
689 /*
690 * close device mmu
691 *
692 * call this with the global lock held
693 */
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600694 if (mmu->setstate_memory.gpuaddr)
695 kgsl_sharedmem_free(&mmu->setstate_memory);
696
697 if (mmu->defaultpagetable)
698 kgsl_mmu_putpagetable(mmu->defaultpagetable);
699
700 return 0;
701}
702
703static unsigned int
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600704kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600705{
706 unsigned int ptbase;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600707 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600708 return ptbase;
709}
710
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600711static unsigned int
712kgsl_gpummu_pt_get_base_addr(struct kgsl_pagetable *pt)
713{
714 struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
715 return gpummu_pt->base.gpuaddr;
716}
717
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600718struct kgsl_mmu_ops gpummu_ops = {
719 .mmu_init = kgsl_gpummu_init,
720 .mmu_close = kgsl_gpummu_close,
721 .mmu_start = kgsl_gpummu_start,
722 .mmu_stop = kgsl_gpummu_stop,
723 .mmu_setstate = kgsl_gpummu_setstate,
724 .mmu_device_setstate = kgsl_gpummu_default_setstate,
725 .mmu_pagefault = kgsl_gpummu_pagefault,
726 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600727 .mmu_enable_clk = NULL,
728 .mmu_disable_clk = NULL,
Shubhraprakash Dasd3f937c2012-05-07 12:44:40 -0600729 .mmu_get_hwpagetable_asid = NULL,
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600730 .mmu_get_pt_lsb = NULL,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600731};
732
733struct kgsl_mmu_pt_ops gpummu_pt_ops = {
734 .mmu_map = kgsl_gpummu_map,
735 .mmu_unmap = kgsl_gpummu_unmap,
736 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
737 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
738 .mmu_pt_equal = kgsl_gpummu_pt_equal,
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600739 .mmu_pt_get_base_addr = kgsl_gpummu_pt_get_base_addr,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600740};