blob: 429d035bd4ffaa2b0795be8f01019b512ce90c79 [file] [log] [blame]
Shubhraprakash Dasc0f21b62012-02-16 11:24:43 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
Jeremy Gebben5935b7a2012-05-09 14:52:34 -060024#include "kgsl_trace.h"
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060025
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060026#define KGSL_PAGETABLE_SIZE \
27 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
28 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
29
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060030static ssize_t
31sysfs_show_ptpool_entries(struct kobject *kobj,
32 struct kobj_attribute *attr,
33 char *buf)
34{
35 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
36 kgsl_driver.ptpool;
37 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
38}
39
40static ssize_t
41sysfs_show_ptpool_min(struct kobject *kobj,
42 struct kobj_attribute *attr,
43 char *buf)
44{
45 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
46 kgsl_driver.ptpool;
47 return snprintf(buf, PAGE_SIZE, "%d\n",
48 pool->static_entries);
49}
50
51static ssize_t
52sysfs_show_ptpool_chunks(struct kobject *kobj,
53 struct kobj_attribute *attr,
54 char *buf)
55{
56 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
57 kgsl_driver.ptpool;
58 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
59}
60
61static ssize_t
62sysfs_show_ptpool_ptsize(struct kobject *kobj,
63 struct kobj_attribute *attr,
64 char *buf)
65{
66 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
67 kgsl_driver.ptpool;
68 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
69}
70
71static struct kobj_attribute attr_ptpool_entries = {
72 .attr = { .name = "ptpool_entries", .mode = 0444 },
73 .show = sysfs_show_ptpool_entries,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_min = {
78 .attr = { .name = "ptpool_min", .mode = 0444 },
79 .show = sysfs_show_ptpool_min,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_chunks = {
84 .attr = { .name = "ptpool_chunks", .mode = 0444 },
85 .show = sysfs_show_ptpool_chunks,
86 .store = NULL,
87};
88
89static struct kobj_attribute attr_ptpool_ptsize = {
90 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
91 .show = sysfs_show_ptpool_ptsize,
92 .store = NULL,
93};
94
95static struct attribute *ptpool_attrs[] = {
96 &attr_ptpool_entries.attr,
97 &attr_ptpool_min.attr,
98 &attr_ptpool_chunks.attr,
99 &attr_ptpool_ptsize.attr,
100 NULL,
101};
102
103static struct attribute_group ptpool_attr_group = {
104 .attrs = ptpool_attrs,
105};
106
107static int
108_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
109{
110 struct kgsl_ptpool_chunk *chunk;
111 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
112
113 BUG_ON(count == 0);
114
115 if (get_order(size) >= MAX_ORDER) {
116 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
117 return -EINVAL;
118 }
119
120 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
121 if (chunk == NULL) {
122 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
123 return -ENOMEM;
124 }
125
126 chunk->size = size;
127 chunk->count = count;
128 chunk->dynamic = dynamic;
129
130 chunk->data = dma_alloc_coherent(NULL, size,
131 &chunk->phys, GFP_KERNEL);
132
133 if (chunk->data == NULL) {
134 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
135 goto err;
136 }
137
138 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
139
140 if (chunk->bitmap == NULL) {
141 KGSL_CORE_ERR("kzalloc(%d) failed\n",
142 BITS_TO_LONGS(count) * 4);
143 goto err_dma;
144 }
145
146 list_add_tail(&chunk->list, &pool->list);
147
148 pool->chunks++;
149 pool->entries += count;
150
151 if (!dynamic)
152 pool->static_entries += count;
153
154 return 0;
155
156err_dma:
157 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
158err:
159 kfree(chunk);
160 return -ENOMEM;
161}
162
163static void *
164_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
165{
166 struct kgsl_ptpool_chunk *chunk;
167
168 list_for_each_entry(chunk, &pool->list, list) {
169 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
170
171 if (bit >= chunk->count)
172 continue;
173
174 set_bit(bit, chunk->bitmap);
175 *physaddr = chunk->phys + (bit * pool->ptsize);
176
177 return chunk->data + (bit * pool->ptsize);
178 }
179
180 return NULL;
181}
182
183/**
184 * kgsl_ptpool_add
185 * @pool: A pointer to a ptpool structure
186 * @entries: Number of entries to add
187 *
188 * Add static entries to the pagetable pool.
189 */
190
191static int
192kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
193{
194 int ret = 0;
195 BUG_ON(count == 0);
196
197 mutex_lock(&pool->lock);
198
199 /* Only 4MB can be allocated in one chunk, so larger allocations
200 need to be split into multiple sections */
201
202 while (count) {
203 int entries = ((count * pool->ptsize) > SZ_4M) ?
204 SZ_4M / pool->ptsize : count;
205
206 /* Add the entries as static, i.e. they don't ever stand
207 a chance of being removed */
208
209 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
210 if (ret)
211 break;
212
213 count -= entries;
214 }
215
216 mutex_unlock(&pool->lock);
217 return ret;
218}
219
220/**
221 * kgsl_ptpool_alloc
222 * @pool: A pointer to a ptpool structure
223 * @addr: A pointer to store the physical address of the chunk
224 *
225 * Allocate a pagetable from the pool. Returns the virtual address
226 * of the pagetable, the physical address is returned in physaddr
227 */
228
229static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
230 unsigned int *physaddr)
231{
232 void *addr = NULL;
233 int ret;
234
235 mutex_lock(&pool->lock);
236 addr = _kgsl_ptpool_get_entry(pool, physaddr);
237 if (addr)
238 goto done;
239
240 /* Add a chunk for 1 more pagetable and mark it as dynamic */
241 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
242
243 if (ret)
244 goto done;
245
246 addr = _kgsl_ptpool_get_entry(pool, physaddr);
247done:
248 mutex_unlock(&pool->lock);
249 return addr;
250}
251
252static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
253{
254 list_del(&chunk->list);
255
256 if (chunk->data)
257 dma_free_coherent(NULL, chunk->size, chunk->data,
258 chunk->phys);
259 kfree(chunk->bitmap);
260 kfree(chunk);
261}
262
263/**
264 * kgsl_ptpool_free
265 * @pool: A pointer to a ptpool structure
266 * @addr: A pointer to the virtual address to free
267 *
268 * Free a pagetable allocated from the pool
269 */
270
271static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
272{
273 struct kgsl_ptpool_chunk *chunk, *tmp;
274
275 if (pool == NULL || addr == NULL)
276 return;
277
278 mutex_lock(&pool->lock);
279 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
280 if (addr >= chunk->data &&
281 addr < chunk->data + chunk->size) {
282 int bit = ((unsigned long) (addr - chunk->data)) /
283 pool->ptsize;
284
285 clear_bit(bit, chunk->bitmap);
286 memset(addr, 0, pool->ptsize);
287
288 if (chunk->dynamic &&
289 bitmap_empty(chunk->bitmap, chunk->count))
290 _kgsl_ptpool_rm_chunk(chunk);
291
292 break;
293 }
294 }
295
296 mutex_unlock(&pool->lock);
297}
298
299void kgsl_gpummu_ptpool_destroy(void *ptpool)
300{
301 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
302 struct kgsl_ptpool_chunk *chunk, *tmp;
303
304 if (pool == NULL)
305 return;
306
307 mutex_lock(&pool->lock);
308 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
309 _kgsl_ptpool_rm_chunk(chunk);
310 mutex_unlock(&pool->lock);
311
312 kfree(pool);
313}
314
315/**
316 * kgsl_ptpool_init
317 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600318 * @entries: The number of inital entries to add to the pool
319 *
320 * Initalize a pool and allocate an initial chunk of entries.
321 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600322void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600323{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600324 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600325 struct kgsl_ptpool *pool;
326 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600327
328 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
329 if (!pool) {
330 KGSL_CORE_ERR("Failed to allocate memory "
331 "for ptpool\n");
332 return NULL;
333 }
334
335 pool->ptsize = ptsize;
336 mutex_init(&pool->lock);
337 INIT_LIST_HEAD(&pool->list);
338
339 if (entries) {
340 ret = kgsl_ptpool_add(pool, entries);
341 if (ret)
342 goto err_ptpool_remove;
343 }
344
345 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
346 if (ret) {
347 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
348 "statistics: %d\n", ret);
349 goto err_ptpool_remove;
350 }
351 return (void *)pool;
352
353err_ptpool_remove:
354 kgsl_gpummu_ptpool_destroy(pool);
355 return NULL;
356}
357
358int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
359 unsigned int pt_base)
360{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700361 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
362 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600363}
364
365void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
366{
367 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
368 mmu_specific_pt;
369 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
370 gpummu_pt->base.hostptr);
371
372 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
373
374 kfree(gpummu_pt->tlbflushfilter.base);
375
376 kfree(gpummu_pt);
377}
378
379static inline uint32_t
380kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
381{
382 return (va - va_base) >> PAGE_SHIFT;
383}
384
385static inline void
386kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
387{
388 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700389 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
390 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600391}
392
393static inline uint32_t
394kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
395{
396 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700397 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
398 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600399}
400
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600401static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600402{
403 unsigned int reg;
404 unsigned int ptbase;
405
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600406 kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
407 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600408
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600409 KGSL_MEM_CRIT(mmu->device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600410 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
411 reg & ~(PAGE_SIZE - 1),
412 kgsl_mmu_get_ptname_from_ptbase(ptbase),
413 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
Jeremy Gebben5935b7a2012-05-09 14:52:34 -0600414 trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
415 kgsl_mmu_get_ptname_from_ptbase(ptbase),
416 reg & 0x02 ? "WRITE" : "READ");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600417}
418
419static void *kgsl_gpummu_create_pagetable(void)
420{
421 struct kgsl_gpummu_pt *gpummu_pt;
422
423 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
424 GFP_KERNEL);
425 if (!gpummu_pt)
426 return NULL;
427
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600428 gpummu_pt->last_superpte = 0;
429
430 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
431 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
432 gpummu_pt->tlbflushfilter.base = (unsigned int *)
433 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
434 if (!gpummu_pt->tlbflushfilter.base) {
435 KGSL_CORE_ERR("kzalloc(%d) failed\n",
436 gpummu_pt->tlbflushfilter.size);
437 goto err_free_gpummu;
438 }
439 GSL_TLBFLUSH_FILTER_RESET();
440
441 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
442 kgsl_driver.ptpool,
443 &gpummu_pt->base.physaddr);
444
445 if (gpummu_pt->base.hostptr == NULL)
446 goto err_flushfilter;
447
448 /* ptpool allocations are from coherent memory, so update the
449 device statistics acordingly */
450
451 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
452 kgsl_driver.stats.coherent_max);
453
454 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
455 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
456
457 return (void *)gpummu_pt;
458
459err_flushfilter:
460 kfree(gpummu_pt->tlbflushfilter.base);
461err_free_gpummu:
462 kfree(gpummu_pt);
463
464 return NULL;
465}
466
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600467static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600468 uint32_t flags)
469{
470 struct kgsl_gpummu_pt *gpummu_pt;
471 if (!kgsl_mmu_enabled())
472 return;
473
474 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600475 kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
476 gpummu_pt = mmu->hwpagetable->priv;
477 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478 gpummu_pt->base.gpuaddr);
479 }
480
481 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
482 /* Invalidate all and tc */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600483 kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600484 }
485}
486
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600487static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600488 struct kgsl_pagetable *pagetable)
489{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600490 if (mmu->flags & KGSL_FLAGS_STARTED) {
491 /* page table not current, then setup mmu to use new
492 * specified page table
493 */
494 if (mmu->hwpagetable != pagetable) {
495 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600496 /* Since we do a TLB flush the tlb_flags should
497 * be cleared by calling kgsl_mmu_pt_get_flags
498 */
499 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600500
501 /* call device specific set page table */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600502 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600503 KGSL_MMUFLAGS_PTUPDATE);
504 }
505 }
506}
507
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600508static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600509{
510 /*
511 * intialize device mmu
512 *
513 * call this with the global lock held
514 */
515 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600516
517 /* sub-client MMU lookups require address translation */
518 if ((mmu->config & ~0x1) > 0) {
519 /*make sure virtual address range is a multiple of 64Kb */
520 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
521 KGSL_CORE_ERR("Invalid pagetable size requested "
522 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
523 return -EINVAL;
524 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600525 }
526
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600527 dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600528 __func__);
529 return status;
530}
531
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600532static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600533{
534 /*
535 * intialize device mmu
536 *
537 * call this with the global lock held
538 */
539
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600540 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600541 struct kgsl_gpummu_pt *gpummu_pt;
542
543 if (mmu->flags & KGSL_FLAGS_STARTED)
544 return 0;
545
546 /* MMU not enabled */
547 if ((mmu->config & 0x1) == 0)
548 return 0;
549
550 /* setup MMU and sub-client behavior */
551 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
552
553 /* idle device */
554 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
555
556 /* enable axi interrupts */
557 kgsl_regwrite(device, MH_INTERRUPT_MASK,
558 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
559
560 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
561 mmu->setstate_memory.size);
562
563 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
564 * to complete transactions in case of an MMU fault. Note that
565 * we'll leave the bottom 32 bytes of the setstate_memory for other
566 * purposes (e.g. use it when dummy read cycles are needed
567 * for other blocks) */
568 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
569 mmu->setstate_memory.physaddr + 32);
570
571 if (mmu->defaultpagetable == NULL)
572 mmu->defaultpagetable =
573 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
574
575 /* Return error if the default pagetable doesn't exist */
576 if (mmu->defaultpagetable == NULL)
577 return -ENOMEM;
578
579 mmu->hwpagetable = mmu->defaultpagetable;
580 gpummu_pt = mmu->hwpagetable->priv;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600581 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600582 gpummu_pt->base.gpuaddr);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600583 kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600584 (KGSL_PAGETABLE_BASE |
585 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600586 kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600587 mmu->flags |= KGSL_FLAGS_STARTED;
588
589 return 0;
590}
591
592static int
593kgsl_gpummu_unmap(void *mmu_specific_pt,
594 struct kgsl_memdesc *memdesc)
595{
596 unsigned int numpages;
597 unsigned int pte, ptefirst, ptelast, superpte;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600598 unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600599 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
600
601 /* All GPU addresses as assigned are page aligned, but some
602 functions purturb the gpuaddr with an offset, so apply the
603 mask here to make sure we have the right address */
604
605 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
606
607 numpages = (range >> PAGE_SHIFT);
608 if (range & (PAGE_SIZE - 1))
609 numpages++;
610
611 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
612 ptelast = ptefirst + numpages;
613
614 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
615 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
616 for (pte = ptefirst; pte < ptelast; pte++) {
617#ifdef VERBOSE_DEBUG
618 /* check if PTE exists */
619 if (!kgsl_pt_map_get(gpummu_pt, pte))
620 KGSL_CORE_ERR("pt entry %x is already "
621 "unmapped for pagetable %p\n", pte, gpummu_pt);
622#endif
623 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
624 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
625 if (pte == superpte)
626 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
627 GSL_PT_SUPER_PTE);
628 }
629
630 /* Post all writes to the pagetable */
631 wmb();
632
633 return 0;
634}
635
Jordan Croused17e9aa2011-10-12 16:57:48 -0600636#define SUPERPTE_IS_DIRTY(_p) \
637(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
638GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
639
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600640static int
641kgsl_gpummu_map(void *mmu_specific_pt,
642 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600643 unsigned int protflags,
644 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600645{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600646 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600647 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600648 struct scatterlist *s;
649 int flushtlb = 0;
650 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600651
Jordan Croused17e9aa2011-10-12 16:57:48 -0600652 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600653
Jordan Croused17e9aa2011-10-12 16:57:48 -0600654 /* Flush the TLB if the first PTE isn't at the superpte boundary */
655 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600656 flushtlb = 1;
657
Jordan Croused17e9aa2011-10-12 16:57:48 -0600658 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600659 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600660 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600661
Jordan Croused17e9aa2011-10-12 16:57:48 -0600662 /* Each sg entry might be multiple pages long */
663 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
664 if (SUPERPTE_IS_DIRTY(pte))
665 flushtlb = 1;
666 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600667 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600668 }
669
Jordan Croused17e9aa2011-10-12 16:57:48 -0600670 /* Flush the TLB if the last PTE isn't at the superpte boundary */
671 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
672 flushtlb = 1;
673
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600674 wmb();
675
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600676 if (flushtlb) {
677 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600678 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600679 GSL_TLBFLUSH_FILTER_RESET();
680 }
681
682 return 0;
683}
684
Shubhraprakash Das79447952012-04-26 18:12:23 -0600685static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600686{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600687 kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600688 mmu->flags &= ~KGSL_FLAGS_STARTED;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600689}
690
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600691static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600692{
693 /*
694 * close device mmu
695 *
696 * call this with the global lock held
697 */
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600698 if (mmu->setstate_memory.gpuaddr)
699 kgsl_sharedmem_free(&mmu->setstate_memory);
700
701 if (mmu->defaultpagetable)
702 kgsl_mmu_putpagetable(mmu->defaultpagetable);
703
704 return 0;
705}
706
707static unsigned int
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600708kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600709{
710 unsigned int ptbase;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600711 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600712 return ptbase;
713}
714
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600715static unsigned int
716kgsl_gpummu_pt_get_base_addr(struct kgsl_pagetable *pt)
717{
718 struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
719 return gpummu_pt->base.gpuaddr;
720}
721
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600722struct kgsl_mmu_ops gpummu_ops = {
723 .mmu_init = kgsl_gpummu_init,
724 .mmu_close = kgsl_gpummu_close,
725 .mmu_start = kgsl_gpummu_start,
726 .mmu_stop = kgsl_gpummu_stop,
727 .mmu_setstate = kgsl_gpummu_setstate,
728 .mmu_device_setstate = kgsl_gpummu_default_setstate,
729 .mmu_pagefault = kgsl_gpummu_pagefault,
730 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600731 .mmu_enable_clk = NULL,
732 .mmu_disable_clk = NULL,
Shubhraprakash Dasd3f937c2012-05-07 12:44:40 -0600733 .mmu_get_hwpagetable_asid = NULL,
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600734 .mmu_get_pt_lsb = NULL,
Shubhraprakash Dasa5b1db42012-05-09 18:02:34 -0600735 .mmu_get_reg_map_desc = NULL,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600736};
737
738struct kgsl_mmu_pt_ops gpummu_pt_ops = {
739 .mmu_map = kgsl_gpummu_map,
740 .mmu_unmap = kgsl_gpummu_unmap,
741 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
742 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
743 .mmu_pt_equal = kgsl_gpummu_pt_equal,
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600744 .mmu_pt_get_base_addr = kgsl_gpummu_pt_get_base_addr,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600745};