blob: 8f285053fb8943811e905a14bcfe48425ae9b33c [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
Jeremy Gebben5935b7a2012-05-09 14:52:34 -060024#include "kgsl_trace.h"
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060025
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060026#define KGSL_PAGETABLE_SIZE \
27 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
28 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
29
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060030static ssize_t
31sysfs_show_ptpool_entries(struct kobject *kobj,
32 struct kobj_attribute *attr,
33 char *buf)
34{
35 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
36 kgsl_driver.ptpool;
37 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
38}
39
40static ssize_t
41sysfs_show_ptpool_min(struct kobject *kobj,
42 struct kobj_attribute *attr,
43 char *buf)
44{
45 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
46 kgsl_driver.ptpool;
47 return snprintf(buf, PAGE_SIZE, "%d\n",
48 pool->static_entries);
49}
50
51static ssize_t
52sysfs_show_ptpool_chunks(struct kobject *kobj,
53 struct kobj_attribute *attr,
54 char *buf)
55{
56 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
57 kgsl_driver.ptpool;
58 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
59}
60
61static ssize_t
62sysfs_show_ptpool_ptsize(struct kobject *kobj,
63 struct kobj_attribute *attr,
64 char *buf)
65{
66 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
67 kgsl_driver.ptpool;
68 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
69}
70
71static struct kobj_attribute attr_ptpool_entries = {
72 .attr = { .name = "ptpool_entries", .mode = 0444 },
73 .show = sysfs_show_ptpool_entries,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_min = {
78 .attr = { .name = "ptpool_min", .mode = 0444 },
79 .show = sysfs_show_ptpool_min,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_chunks = {
84 .attr = { .name = "ptpool_chunks", .mode = 0444 },
85 .show = sysfs_show_ptpool_chunks,
86 .store = NULL,
87};
88
89static struct kobj_attribute attr_ptpool_ptsize = {
90 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
91 .show = sysfs_show_ptpool_ptsize,
92 .store = NULL,
93};
94
95static struct attribute *ptpool_attrs[] = {
96 &attr_ptpool_entries.attr,
97 &attr_ptpool_min.attr,
98 &attr_ptpool_chunks.attr,
99 &attr_ptpool_ptsize.attr,
100 NULL,
101};
102
103static struct attribute_group ptpool_attr_group = {
104 .attrs = ptpool_attrs,
105};
106
107static int
108_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
109{
110 struct kgsl_ptpool_chunk *chunk;
111 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
112
113 BUG_ON(count == 0);
114
115 if (get_order(size) >= MAX_ORDER) {
116 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
117 return -EINVAL;
118 }
119
120 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
121 if (chunk == NULL) {
122 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
123 return -ENOMEM;
124 }
125
126 chunk->size = size;
127 chunk->count = count;
128 chunk->dynamic = dynamic;
129
130 chunk->data = dma_alloc_coherent(NULL, size,
131 &chunk->phys, GFP_KERNEL);
132
133 if (chunk->data == NULL) {
134 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
135 goto err;
136 }
137
138 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
139
140 if (chunk->bitmap == NULL) {
141 KGSL_CORE_ERR("kzalloc(%d) failed\n",
142 BITS_TO_LONGS(count) * 4);
143 goto err_dma;
144 }
145
146 list_add_tail(&chunk->list, &pool->list);
147
148 pool->chunks++;
149 pool->entries += count;
150
151 if (!dynamic)
152 pool->static_entries += count;
153
154 return 0;
155
156err_dma:
157 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
158err:
159 kfree(chunk);
160 return -ENOMEM;
161}
162
163static void *
164_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
165{
166 struct kgsl_ptpool_chunk *chunk;
167
168 list_for_each_entry(chunk, &pool->list, list) {
169 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
170
171 if (bit >= chunk->count)
172 continue;
173
174 set_bit(bit, chunk->bitmap);
175 *physaddr = chunk->phys + (bit * pool->ptsize);
176
177 return chunk->data + (bit * pool->ptsize);
178 }
179
180 return NULL;
181}
182
183/**
184 * kgsl_ptpool_add
185 * @pool: A pointer to a ptpool structure
186 * @entries: Number of entries to add
187 *
188 * Add static entries to the pagetable pool.
189 */
190
191static int
192kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
193{
194 int ret = 0;
195 BUG_ON(count == 0);
196
197 mutex_lock(&pool->lock);
198
199 /* Only 4MB can be allocated in one chunk, so larger allocations
200 need to be split into multiple sections */
201
202 while (count) {
203 int entries = ((count * pool->ptsize) > SZ_4M) ?
204 SZ_4M / pool->ptsize : count;
205
206 /* Add the entries as static, i.e. they don't ever stand
207 a chance of being removed */
208
209 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
210 if (ret)
211 break;
212
213 count -= entries;
214 }
215
216 mutex_unlock(&pool->lock);
217 return ret;
218}
219
220/**
221 * kgsl_ptpool_alloc
222 * @pool: A pointer to a ptpool structure
223 * @addr: A pointer to store the physical address of the chunk
224 *
225 * Allocate a pagetable from the pool. Returns the virtual address
226 * of the pagetable, the physical address is returned in physaddr
227 */
228
229static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
230 unsigned int *physaddr)
231{
232 void *addr = NULL;
233 int ret;
234
235 mutex_lock(&pool->lock);
236 addr = _kgsl_ptpool_get_entry(pool, physaddr);
237 if (addr)
238 goto done;
239
240 /* Add a chunk for 1 more pagetable and mark it as dynamic */
241 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
242
243 if (ret)
244 goto done;
245
246 addr = _kgsl_ptpool_get_entry(pool, physaddr);
247done:
248 mutex_unlock(&pool->lock);
249 return addr;
250}
251
252static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
253{
254 list_del(&chunk->list);
255
256 if (chunk->data)
257 dma_free_coherent(NULL, chunk->size, chunk->data,
258 chunk->phys);
259 kfree(chunk->bitmap);
260 kfree(chunk);
261}
262
263/**
264 * kgsl_ptpool_free
265 * @pool: A pointer to a ptpool structure
266 * @addr: A pointer to the virtual address to free
267 *
268 * Free a pagetable allocated from the pool
269 */
270
271static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
272{
273 struct kgsl_ptpool_chunk *chunk, *tmp;
274
275 if (pool == NULL || addr == NULL)
276 return;
277
278 mutex_lock(&pool->lock);
279 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
280 if (addr >= chunk->data &&
281 addr < chunk->data + chunk->size) {
282 int bit = ((unsigned long) (addr - chunk->data)) /
283 pool->ptsize;
284
285 clear_bit(bit, chunk->bitmap);
286 memset(addr, 0, pool->ptsize);
287
288 if (chunk->dynamic &&
289 bitmap_empty(chunk->bitmap, chunk->count))
290 _kgsl_ptpool_rm_chunk(chunk);
291
292 break;
293 }
294 }
295
296 mutex_unlock(&pool->lock);
297}
298
299void kgsl_gpummu_ptpool_destroy(void *ptpool)
300{
301 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
302 struct kgsl_ptpool_chunk *chunk, *tmp;
303
304 if (pool == NULL)
305 return;
306
307 mutex_lock(&pool->lock);
308 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
309 _kgsl_ptpool_rm_chunk(chunk);
310 mutex_unlock(&pool->lock);
311
312 kfree(pool);
313}
314
315/**
316 * kgsl_ptpool_init
317 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600318 * @entries: The number of inital entries to add to the pool
319 *
320 * Initalize a pool and allocate an initial chunk of entries.
321 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600322void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600323{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600324 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600325 struct kgsl_ptpool *pool;
326 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600327
328 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
329 if (!pool) {
330 KGSL_CORE_ERR("Failed to allocate memory "
331 "for ptpool\n");
332 return NULL;
333 }
334
335 pool->ptsize = ptsize;
336 mutex_init(&pool->lock);
337 INIT_LIST_HEAD(&pool->list);
338
339 if (entries) {
340 ret = kgsl_ptpool_add(pool, entries);
341 if (ret)
342 goto err_ptpool_remove;
343 }
344
345 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
346 if (ret) {
347 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
348 "statistics: %d\n", ret);
349 goto err_ptpool_remove;
350 }
351 return (void *)pool;
352
353err_ptpool_remove:
354 kgsl_gpummu_ptpool_destroy(pool);
355 return NULL;
356}
357
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700358int kgsl_gpummu_pt_equal(struct kgsl_mmu *mmu,
359 struct kgsl_pagetable *pt,
360 unsigned int pt_base)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600361{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700362 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
363 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600364}
365
366void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
367{
368 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
369 mmu_specific_pt;
370 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
371 gpummu_pt->base.hostptr);
372
373 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
374
375 kfree(gpummu_pt->tlbflushfilter.base);
376
377 kfree(gpummu_pt);
378}
379
380static inline uint32_t
381kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
382{
383 return (va - va_base) >> PAGE_SHIFT;
384}
385
386static inline void
387kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
388{
389 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700390 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
391 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600392}
393
394static inline uint32_t
395kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
396{
397 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700398 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
399 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600400}
401
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600402static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600403{
404 unsigned int reg;
405 unsigned int ptbase;
406
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600407 kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
408 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600409
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600410 KGSL_MEM_CRIT(mmu->device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600411 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
412 reg & ~(PAGE_SIZE - 1),
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700413 kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600414 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
Jeremy Gebben5935b7a2012-05-09 14:52:34 -0600415 trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700416 kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
Jeremy Gebben5935b7a2012-05-09 14:52:34 -0600417 reg & 0x02 ? "WRITE" : "READ");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600418}
419
420static void *kgsl_gpummu_create_pagetable(void)
421{
422 struct kgsl_gpummu_pt *gpummu_pt;
423
424 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
425 GFP_KERNEL);
426 if (!gpummu_pt)
427 return NULL;
428
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600429 gpummu_pt->last_superpte = 0;
430
431 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
432 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
433 gpummu_pt->tlbflushfilter.base = (unsigned int *)
434 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
435 if (!gpummu_pt->tlbflushfilter.base) {
436 KGSL_CORE_ERR("kzalloc(%d) failed\n",
437 gpummu_pt->tlbflushfilter.size);
438 goto err_free_gpummu;
439 }
440 GSL_TLBFLUSH_FILTER_RESET();
441
442 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
443 kgsl_driver.ptpool,
444 &gpummu_pt->base.physaddr);
445
446 if (gpummu_pt->base.hostptr == NULL)
447 goto err_flushfilter;
448
449 /* ptpool allocations are from coherent memory, so update the
450 device statistics acordingly */
451
452 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
453 kgsl_driver.stats.coherent_max);
454
455 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
456 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
457
458 return (void *)gpummu_pt;
459
460err_flushfilter:
461 kfree(gpummu_pt->tlbflushfilter.base);
462err_free_gpummu:
463 kfree(gpummu_pt);
464
465 return NULL;
466}
467
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600468static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600469 uint32_t flags)
470{
471 struct kgsl_gpummu_pt *gpummu_pt;
472 if (!kgsl_mmu_enabled())
473 return;
474
475 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
Jordan Crousea29a2e02012-08-14 09:09:23 -0600476 kgsl_idle(mmu->device);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600477 gpummu_pt = mmu->hwpagetable->priv;
478 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600479 gpummu_pt->base.gpuaddr);
480 }
481
482 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
483 /* Invalidate all and tc */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600484 kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600485 }
486}
487
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600488static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600489 struct kgsl_pagetable *pagetable,
490 unsigned int context_id)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600491{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600492 if (mmu->flags & KGSL_FLAGS_STARTED) {
493 /* page table not current, then setup mmu to use new
494 * specified page table
495 */
496 if (mmu->hwpagetable != pagetable) {
497 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600498 /* Since we do a TLB flush the tlb_flags should
499 * be cleared by calling kgsl_mmu_pt_get_flags
500 */
501 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600502
503 /* call device specific set page table */
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600504 kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600505 KGSL_MMUFLAGS_PTUPDATE);
506 }
507 }
508}
509
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600510static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600511{
512 /*
513 * intialize device mmu
514 *
515 * call this with the global lock held
516 */
517 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600518
519 /* sub-client MMU lookups require address translation */
520 if ((mmu->config & ~0x1) > 0) {
521 /*make sure virtual address range is a multiple of 64Kb */
522 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
523 KGSL_CORE_ERR("Invalid pagetable size requested "
524 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
525 return -EINVAL;
526 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600527 }
528
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600529 dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600530 __func__);
531 return status;
532}
533
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600534static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600535{
536 /*
537 * intialize device mmu
538 *
539 * call this with the global lock held
540 */
541
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600542 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600543 struct kgsl_gpummu_pt *gpummu_pt;
544
545 if (mmu->flags & KGSL_FLAGS_STARTED)
546 return 0;
547
548 /* MMU not enabled */
549 if ((mmu->config & 0x1) == 0)
550 return 0;
551
552 /* setup MMU and sub-client behavior */
553 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
554
555 /* idle device */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600556 kgsl_idle(device);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600557
558 /* enable axi interrupts */
559 kgsl_regwrite(device, MH_INTERRUPT_MASK,
560 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
561
562 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
563 mmu->setstate_memory.size);
564
565 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
566 * to complete transactions in case of an MMU fault. Note that
567 * we'll leave the bottom 32 bytes of the setstate_memory for other
568 * purposes (e.g. use it when dummy read cycles are needed
569 * for other blocks) */
570 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
571 mmu->setstate_memory.physaddr + 32);
572
573 if (mmu->defaultpagetable == NULL)
574 mmu->defaultpagetable =
575 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
576
577 /* Return error if the default pagetable doesn't exist */
578 if (mmu->defaultpagetable == NULL)
579 return -ENOMEM;
580
581 mmu->hwpagetable = mmu->defaultpagetable;
582 gpummu_pt = mmu->hwpagetable->priv;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600583 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600584 gpummu_pt->base.gpuaddr);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600585 kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600586 (KGSL_PAGETABLE_BASE |
587 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600588 kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600589 mmu->flags |= KGSL_FLAGS_STARTED;
590
591 return 0;
592}
593
594static int
595kgsl_gpummu_unmap(void *mmu_specific_pt,
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600596 struct kgsl_memdesc *memdesc,
597 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600598{
599 unsigned int numpages;
600 unsigned int pte, ptefirst, ptelast, superpte;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600601 unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600602 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
603
604 /* All GPU addresses as assigned are page aligned, but some
605 functions purturb the gpuaddr with an offset, so apply the
606 mask here to make sure we have the right address */
607
608 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
609
610 numpages = (range >> PAGE_SHIFT);
611 if (range & (PAGE_SIZE - 1))
612 numpages++;
613
614 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
615 ptelast = ptefirst + numpages;
616
617 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
618 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
619 for (pte = ptefirst; pte < ptelast; pte++) {
620#ifdef VERBOSE_DEBUG
621 /* check if PTE exists */
622 if (!kgsl_pt_map_get(gpummu_pt, pte))
623 KGSL_CORE_ERR("pt entry %x is already "
624 "unmapped for pagetable %p\n", pte, gpummu_pt);
625#endif
626 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
627 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
628 if (pte == superpte)
629 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
630 GSL_PT_SUPER_PTE);
631 }
632
633 /* Post all writes to the pagetable */
634 wmb();
635
636 return 0;
637}
638
Jordan Croused17e9aa2011-10-12 16:57:48 -0600639#define SUPERPTE_IS_DIRTY(_p) \
640(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
641GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
642
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600643static int
644kgsl_gpummu_map(void *mmu_specific_pt,
645 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600646 unsigned int protflags,
647 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600648{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600649 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600650 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600651 struct scatterlist *s;
652 int flushtlb = 0;
653 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600654
Jordan Croused17e9aa2011-10-12 16:57:48 -0600655 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600656
Jordan Croused17e9aa2011-10-12 16:57:48 -0600657 /* Flush the TLB if the first PTE isn't at the superpte boundary */
658 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600659 flushtlb = 1;
660
Jordan Croused17e9aa2011-10-12 16:57:48 -0600661 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600662 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600663 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600664
Jordan Croused17e9aa2011-10-12 16:57:48 -0600665 /* Each sg entry might be multiple pages long */
666 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
667 if (SUPERPTE_IS_DIRTY(pte))
668 flushtlb = 1;
669 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600670 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600671 }
672
Jordan Croused17e9aa2011-10-12 16:57:48 -0600673 /* Flush the TLB if the last PTE isn't at the superpte boundary */
674 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
675 flushtlb = 1;
676
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600677 wmb();
678
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600679 if (flushtlb) {
680 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600681 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600682 GSL_TLBFLUSH_FILTER_RESET();
683 }
684
685 return 0;
686}
687
Shubhraprakash Das79447952012-04-26 18:12:23 -0600688static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600689{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600690 mmu->flags &= ~KGSL_FLAGS_STARTED;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600691}
692
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600693static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600694{
695 /*
696 * close device mmu
697 *
698 * call this with the global lock held
699 */
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600700 if (mmu->setstate_memory.gpuaddr)
701 kgsl_sharedmem_free(&mmu->setstate_memory);
702
703 if (mmu->defaultpagetable)
704 kgsl_mmu_putpagetable(mmu->defaultpagetable);
705
706 return 0;
707}
708
709static unsigned int
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600710kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600711{
712 unsigned int ptbase;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600713 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600714 return ptbase;
715}
716
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600717static unsigned int
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700718kgsl_gpummu_get_pt_base_addr(struct kgsl_mmu *mmu,
719 struct kgsl_pagetable *pt)
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600720{
721 struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
722 return gpummu_pt->base.gpuaddr;
723}
724
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700725static int kgsl_gpummu_get_num_iommu_units(struct kgsl_mmu *mmu)
726{
727 return 1;
728}
729
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600730struct kgsl_mmu_ops gpummu_ops = {
731 .mmu_init = kgsl_gpummu_init,
732 .mmu_close = kgsl_gpummu_close,
733 .mmu_start = kgsl_gpummu_start,
734 .mmu_stop = kgsl_gpummu_stop,
735 .mmu_setstate = kgsl_gpummu_setstate,
736 .mmu_device_setstate = kgsl_gpummu_default_setstate,
737 .mmu_pagefault = kgsl_gpummu_pagefault,
738 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700739 .mmu_pt_equal = kgsl_gpummu_pt_equal,
740 .mmu_get_pt_base_addr = kgsl_gpummu_get_pt_base_addr,
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600741 .mmu_enable_clk = NULL,
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600742 .mmu_disable_clk_on_ts = NULL,
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600743 .mmu_get_pt_lsb = NULL,
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700744 .mmu_get_reg_gpuaddr = NULL,
745 .mmu_get_num_iommu_units = kgsl_gpummu_get_num_iommu_units,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600746};
747
748struct kgsl_mmu_pt_ops gpummu_pt_ops = {
749 .mmu_map = kgsl_gpummu_map,
750 .mmu_unmap = kgsl_gpummu_unmap,
751 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
752 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600753};