blob: 5a106543cc15736797e6439f46b8396bdac9fefd [file] [log] [blame]
Shubhraprakash Dasc0f21b62012-02-16 11:24:43 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
24
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060025#define KGSL_PAGETABLE_SIZE \
26 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
27 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
28
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060029static ssize_t
30sysfs_show_ptpool_entries(struct kobject *kobj,
31 struct kobj_attribute *attr,
32 char *buf)
33{
34 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
35 kgsl_driver.ptpool;
36 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
37}
38
39static ssize_t
40sysfs_show_ptpool_min(struct kobject *kobj,
41 struct kobj_attribute *attr,
42 char *buf)
43{
44 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
45 kgsl_driver.ptpool;
46 return snprintf(buf, PAGE_SIZE, "%d\n",
47 pool->static_entries);
48}
49
50static ssize_t
51sysfs_show_ptpool_chunks(struct kobject *kobj,
52 struct kobj_attribute *attr,
53 char *buf)
54{
55 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
56 kgsl_driver.ptpool;
57 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
58}
59
60static ssize_t
61sysfs_show_ptpool_ptsize(struct kobject *kobj,
62 struct kobj_attribute *attr,
63 char *buf)
64{
65 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
66 kgsl_driver.ptpool;
67 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
68}
69
70static struct kobj_attribute attr_ptpool_entries = {
71 .attr = { .name = "ptpool_entries", .mode = 0444 },
72 .show = sysfs_show_ptpool_entries,
73 .store = NULL,
74};
75
76static struct kobj_attribute attr_ptpool_min = {
77 .attr = { .name = "ptpool_min", .mode = 0444 },
78 .show = sysfs_show_ptpool_min,
79 .store = NULL,
80};
81
82static struct kobj_attribute attr_ptpool_chunks = {
83 .attr = { .name = "ptpool_chunks", .mode = 0444 },
84 .show = sysfs_show_ptpool_chunks,
85 .store = NULL,
86};
87
88static struct kobj_attribute attr_ptpool_ptsize = {
89 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
90 .show = sysfs_show_ptpool_ptsize,
91 .store = NULL,
92};
93
94static struct attribute *ptpool_attrs[] = {
95 &attr_ptpool_entries.attr,
96 &attr_ptpool_min.attr,
97 &attr_ptpool_chunks.attr,
98 &attr_ptpool_ptsize.attr,
99 NULL,
100};
101
102static struct attribute_group ptpool_attr_group = {
103 .attrs = ptpool_attrs,
104};
105
106static int
107_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
108{
109 struct kgsl_ptpool_chunk *chunk;
110 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
111
112 BUG_ON(count == 0);
113
114 if (get_order(size) >= MAX_ORDER) {
115 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
116 return -EINVAL;
117 }
118
119 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
120 if (chunk == NULL) {
121 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
122 return -ENOMEM;
123 }
124
125 chunk->size = size;
126 chunk->count = count;
127 chunk->dynamic = dynamic;
128
129 chunk->data = dma_alloc_coherent(NULL, size,
130 &chunk->phys, GFP_KERNEL);
131
132 if (chunk->data == NULL) {
133 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
134 goto err;
135 }
136
137 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
138
139 if (chunk->bitmap == NULL) {
140 KGSL_CORE_ERR("kzalloc(%d) failed\n",
141 BITS_TO_LONGS(count) * 4);
142 goto err_dma;
143 }
144
145 list_add_tail(&chunk->list, &pool->list);
146
147 pool->chunks++;
148 pool->entries += count;
149
150 if (!dynamic)
151 pool->static_entries += count;
152
153 return 0;
154
155err_dma:
156 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
157err:
158 kfree(chunk);
159 return -ENOMEM;
160}
161
162static void *
163_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
164{
165 struct kgsl_ptpool_chunk *chunk;
166
167 list_for_each_entry(chunk, &pool->list, list) {
168 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
169
170 if (bit >= chunk->count)
171 continue;
172
173 set_bit(bit, chunk->bitmap);
174 *physaddr = chunk->phys + (bit * pool->ptsize);
175
176 return chunk->data + (bit * pool->ptsize);
177 }
178
179 return NULL;
180}
181
182/**
183 * kgsl_ptpool_add
184 * @pool: A pointer to a ptpool structure
185 * @entries: Number of entries to add
186 *
187 * Add static entries to the pagetable pool.
188 */
189
190static int
191kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
192{
193 int ret = 0;
194 BUG_ON(count == 0);
195
196 mutex_lock(&pool->lock);
197
198 /* Only 4MB can be allocated in one chunk, so larger allocations
199 need to be split into multiple sections */
200
201 while (count) {
202 int entries = ((count * pool->ptsize) > SZ_4M) ?
203 SZ_4M / pool->ptsize : count;
204
205 /* Add the entries as static, i.e. they don't ever stand
206 a chance of being removed */
207
208 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
209 if (ret)
210 break;
211
212 count -= entries;
213 }
214
215 mutex_unlock(&pool->lock);
216 return ret;
217}
218
219/**
220 * kgsl_ptpool_alloc
221 * @pool: A pointer to a ptpool structure
222 * @addr: A pointer to store the physical address of the chunk
223 *
224 * Allocate a pagetable from the pool. Returns the virtual address
225 * of the pagetable, the physical address is returned in physaddr
226 */
227
228static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
229 unsigned int *physaddr)
230{
231 void *addr = NULL;
232 int ret;
233
234 mutex_lock(&pool->lock);
235 addr = _kgsl_ptpool_get_entry(pool, physaddr);
236 if (addr)
237 goto done;
238
239 /* Add a chunk for 1 more pagetable and mark it as dynamic */
240 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
241
242 if (ret)
243 goto done;
244
245 addr = _kgsl_ptpool_get_entry(pool, physaddr);
246done:
247 mutex_unlock(&pool->lock);
248 return addr;
249}
250
251static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
252{
253 list_del(&chunk->list);
254
255 if (chunk->data)
256 dma_free_coherent(NULL, chunk->size, chunk->data,
257 chunk->phys);
258 kfree(chunk->bitmap);
259 kfree(chunk);
260}
261
262/**
263 * kgsl_ptpool_free
264 * @pool: A pointer to a ptpool structure
265 * @addr: A pointer to the virtual address to free
266 *
267 * Free a pagetable allocated from the pool
268 */
269
270static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
271{
272 struct kgsl_ptpool_chunk *chunk, *tmp;
273
274 if (pool == NULL || addr == NULL)
275 return;
276
277 mutex_lock(&pool->lock);
278 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
279 if (addr >= chunk->data &&
280 addr < chunk->data + chunk->size) {
281 int bit = ((unsigned long) (addr - chunk->data)) /
282 pool->ptsize;
283
284 clear_bit(bit, chunk->bitmap);
285 memset(addr, 0, pool->ptsize);
286
287 if (chunk->dynamic &&
288 bitmap_empty(chunk->bitmap, chunk->count))
289 _kgsl_ptpool_rm_chunk(chunk);
290
291 break;
292 }
293 }
294
295 mutex_unlock(&pool->lock);
296}
297
298void kgsl_gpummu_ptpool_destroy(void *ptpool)
299{
300 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
301 struct kgsl_ptpool_chunk *chunk, *tmp;
302
303 if (pool == NULL)
304 return;
305
306 mutex_lock(&pool->lock);
307 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
308 _kgsl_ptpool_rm_chunk(chunk);
309 mutex_unlock(&pool->lock);
310
311 kfree(pool);
312}
313
314/**
315 * kgsl_ptpool_init
316 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600317 * @entries: The number of inital entries to add to the pool
318 *
319 * Initalize a pool and allocate an initial chunk of entries.
320 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600321void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600322{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600323 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600324 struct kgsl_ptpool *pool;
325 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600326
327 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
328 if (!pool) {
329 KGSL_CORE_ERR("Failed to allocate memory "
330 "for ptpool\n");
331 return NULL;
332 }
333
334 pool->ptsize = ptsize;
335 mutex_init(&pool->lock);
336 INIT_LIST_HEAD(&pool->list);
337
338 if (entries) {
339 ret = kgsl_ptpool_add(pool, entries);
340 if (ret)
341 goto err_ptpool_remove;
342 }
343
344 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
345 if (ret) {
346 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
347 "statistics: %d\n", ret);
348 goto err_ptpool_remove;
349 }
350 return (void *)pool;
351
352err_ptpool_remove:
353 kgsl_gpummu_ptpool_destroy(pool);
354 return NULL;
355}
356
357int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
358 unsigned int pt_base)
359{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700360 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
361 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600362}
363
364void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
365{
366 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
367 mmu_specific_pt;
368 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
369 gpummu_pt->base.hostptr);
370
371 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
372
373 kfree(gpummu_pt->tlbflushfilter.base);
374
375 kfree(gpummu_pt);
376}
377
378static inline uint32_t
379kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
380{
381 return (va - va_base) >> PAGE_SHIFT;
382}
383
384static inline void
385kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
386{
387 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700388 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
389 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600390}
391
392static inline uint32_t
393kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
394{
395 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700396 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
397 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600398}
399
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600400static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600401{
402 unsigned int reg;
403 unsigned int ptbase;
404
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600405 kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
406 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600407
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600408 KGSL_MEM_CRIT(mmu->device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600409 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
410 reg & ~(PAGE_SIZE - 1),
411 kgsl_mmu_get_ptname_from_ptbase(ptbase),
412 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
413}
414
415static void *kgsl_gpummu_create_pagetable(void)
416{
417 struct kgsl_gpummu_pt *gpummu_pt;
418
419 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
420 GFP_KERNEL);
421 if (!gpummu_pt)
422 return NULL;
423
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600424 gpummu_pt->last_superpte = 0;
425
426 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
427 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
428 gpummu_pt->tlbflushfilter.base = (unsigned int *)
429 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
430 if (!gpummu_pt->tlbflushfilter.base) {
431 KGSL_CORE_ERR("kzalloc(%d) failed\n",
432 gpummu_pt->tlbflushfilter.size);
433 goto err_free_gpummu;
434 }
435 GSL_TLBFLUSH_FILTER_RESET();
436
437 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
438 kgsl_driver.ptpool,
439 &gpummu_pt->base.physaddr);
440
441 if (gpummu_pt->base.hostptr == NULL)
442 goto err_flushfilter;
443
444 /* ptpool allocations are from coherent memory, so update the
445 device statistics acordingly */
446
447 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
448 kgsl_driver.stats.coherent_max);
449
450 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
451 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
452
453 return (void *)gpummu_pt;
454
455err_flushfilter:
456 kfree(gpummu_pt->tlbflushfilter.base);
457err_free_gpummu:
458 kfree(gpummu_pt);
459
460 return NULL;
461}
462
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600463static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600464 uint32_t flags)
465{
466 struct kgsl_gpummu_pt *gpummu_pt;
467 if (!kgsl_mmu_enabled())
468 return;
469
470 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600471 kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
472 gpummu_pt = mmu->hwpagetable->priv;
473 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600474 gpummu_pt->base.gpuaddr);
475 }
476
477 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
478 /* Invalidate all and tc */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600479 kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600480 }
481}
482
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600483static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600484 struct kgsl_pagetable *pagetable)
485{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600486 if (mmu->flags & KGSL_FLAGS_STARTED) {
487 /* page table not current, then setup mmu to use new
488 * specified page table
489 */
490 if (mmu->hwpagetable != pagetable) {
491 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600492 /* Since we do a TLB flush the tlb_flags should
493 * be cleared by calling kgsl_mmu_pt_get_flags
494 */
495 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600496
497 /* call device specific set page table */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600498 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600499 KGSL_MMUFLAGS_PTUPDATE);
500 }
501 }
502}
503
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600504static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600505{
506 /*
507 * intialize device mmu
508 *
509 * call this with the global lock held
510 */
511 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600512
513 /* sub-client MMU lookups require address translation */
514 if ((mmu->config & ~0x1) > 0) {
515 /*make sure virtual address range is a multiple of 64Kb */
516 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
517 KGSL_CORE_ERR("Invalid pagetable size requested "
518 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
519 return -EINVAL;
520 }
521
522 /* allocate memory used for completing r/w operations that
523 * cannot be mapped by the MMU
524 */
525 status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
526 if (!status)
527 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
528 mmu->setstate_memory.size);
529 }
530
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600531 dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600532 __func__);
533 return status;
534}
535
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600536static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600537{
538 /*
539 * intialize device mmu
540 *
541 * call this with the global lock held
542 */
543
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600544 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600545 struct kgsl_gpummu_pt *gpummu_pt;
546
547 if (mmu->flags & KGSL_FLAGS_STARTED)
548 return 0;
549
550 /* MMU not enabled */
551 if ((mmu->config & 0x1) == 0)
552 return 0;
553
554 /* setup MMU and sub-client behavior */
555 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
556
557 /* idle device */
558 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
559
560 /* enable axi interrupts */
561 kgsl_regwrite(device, MH_INTERRUPT_MASK,
562 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
563
564 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
565 mmu->setstate_memory.size);
566
567 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
568 * to complete transactions in case of an MMU fault. Note that
569 * we'll leave the bottom 32 bytes of the setstate_memory for other
570 * purposes (e.g. use it when dummy read cycles are needed
571 * for other blocks) */
572 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
573 mmu->setstate_memory.physaddr + 32);
574
575 if (mmu->defaultpagetable == NULL)
576 mmu->defaultpagetable =
577 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
578
579 /* Return error if the default pagetable doesn't exist */
580 if (mmu->defaultpagetable == NULL)
581 return -ENOMEM;
582
583 mmu->hwpagetable = mmu->defaultpagetable;
584 gpummu_pt = mmu->hwpagetable->priv;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600585 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600586 gpummu_pt->base.gpuaddr);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600587 kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600588 (KGSL_PAGETABLE_BASE |
589 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600590 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600591 mmu->flags |= KGSL_FLAGS_STARTED;
592
593 return 0;
594}
595
596static int
597kgsl_gpummu_unmap(void *mmu_specific_pt,
598 struct kgsl_memdesc *memdesc)
599{
600 unsigned int numpages;
601 unsigned int pte, ptefirst, ptelast, superpte;
602 unsigned int range = memdesc->size;
603 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
604
605 /* All GPU addresses as assigned are page aligned, but some
606 functions purturb the gpuaddr with an offset, so apply the
607 mask here to make sure we have the right address */
608
609 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
610
611 numpages = (range >> PAGE_SHIFT);
612 if (range & (PAGE_SIZE - 1))
613 numpages++;
614
615 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
616 ptelast = ptefirst + numpages;
617
618 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
619 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
620 for (pte = ptefirst; pte < ptelast; pte++) {
621#ifdef VERBOSE_DEBUG
622 /* check if PTE exists */
623 if (!kgsl_pt_map_get(gpummu_pt, pte))
624 KGSL_CORE_ERR("pt entry %x is already "
625 "unmapped for pagetable %p\n", pte, gpummu_pt);
626#endif
627 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
628 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
629 if (pte == superpte)
630 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
631 GSL_PT_SUPER_PTE);
632 }
633
634 /* Post all writes to the pagetable */
635 wmb();
636
637 return 0;
638}
639
Jordan Croused17e9aa2011-10-12 16:57:48 -0600640#define SUPERPTE_IS_DIRTY(_p) \
641(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
642GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
643
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600644static int
645kgsl_gpummu_map(void *mmu_specific_pt,
646 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600647 unsigned int protflags,
648 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600649{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600650 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600651 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600652 struct scatterlist *s;
653 int flushtlb = 0;
654 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600655
Jordan Croused17e9aa2011-10-12 16:57:48 -0600656 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600657
Jordan Croused17e9aa2011-10-12 16:57:48 -0600658 /* Flush the TLB if the first PTE isn't at the superpte boundary */
659 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600660 flushtlb = 1;
661
Jordan Croused17e9aa2011-10-12 16:57:48 -0600662 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600663 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600664 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600665
Jordan Croused17e9aa2011-10-12 16:57:48 -0600666 /* Each sg entry might be multiple pages long */
667 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
668 if (SUPERPTE_IS_DIRTY(pte))
669 flushtlb = 1;
670 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600671 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600672 }
673
Jordan Croused17e9aa2011-10-12 16:57:48 -0600674 /* Flush the TLB if the last PTE isn't at the superpte boundary */
675 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
676 flushtlb = 1;
677
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600678 wmb();
679
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600680 if (flushtlb) {
681 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600682 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600683 GSL_TLBFLUSH_FILTER_RESET();
684 }
685
686 return 0;
687}
688
Shubhraprakash Das79447952012-04-26 18:12:23 -0600689static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600690{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600691 kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600692 mmu->flags &= ~KGSL_FLAGS_STARTED;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600693}
694
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600695static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600696{
697 /*
698 * close device mmu
699 *
700 * call this with the global lock held
701 */
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600702 if (mmu->setstate_memory.gpuaddr)
703 kgsl_sharedmem_free(&mmu->setstate_memory);
704
705 if (mmu->defaultpagetable)
706 kgsl_mmu_putpagetable(mmu->defaultpagetable);
707
708 return 0;
709}
710
711static unsigned int
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600712kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600713{
714 unsigned int ptbase;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600715 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600716 return ptbase;
717}
718
719struct kgsl_mmu_ops gpummu_ops = {
720 .mmu_init = kgsl_gpummu_init,
721 .mmu_close = kgsl_gpummu_close,
722 .mmu_start = kgsl_gpummu_start,
723 .mmu_stop = kgsl_gpummu_stop,
724 .mmu_setstate = kgsl_gpummu_setstate,
725 .mmu_device_setstate = kgsl_gpummu_default_setstate,
726 .mmu_pagefault = kgsl_gpummu_pagefault,
727 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
728};
729
730struct kgsl_mmu_pt_ops gpummu_pt_ops = {
731 .mmu_map = kgsl_gpummu_map,
732 .mmu_unmap = kgsl_gpummu_unmap,
733 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
734 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
735 .mmu_pt_equal = kgsl_gpummu_pt_equal,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600736};