blob: 8cf00eae7cb53e41743d93d54fb1588e80e354a6 [file] [log] [blame]
Shubhraprakash Dasc0f21b62012-02-16 11:24:43 -07001/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19
20#include "kgsl.h"
21#include "kgsl_mmu.h"
22#include "kgsl_device.h"
23#include "kgsl_sharedmem.h"
Jeremy Gebben5935b7a2012-05-09 14:52:34 -060024#include "kgsl_trace.h"
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060025
Jordan Crouse6d76c4d2012-03-26 09:50:43 -060026#define KGSL_PAGETABLE_SIZE \
27 ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
28 KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
29
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060030static ssize_t
31sysfs_show_ptpool_entries(struct kobject *kobj,
32 struct kobj_attribute *attr,
33 char *buf)
34{
35 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
36 kgsl_driver.ptpool;
37 return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
38}
39
40static ssize_t
41sysfs_show_ptpool_min(struct kobject *kobj,
42 struct kobj_attribute *attr,
43 char *buf)
44{
45 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
46 kgsl_driver.ptpool;
47 return snprintf(buf, PAGE_SIZE, "%d\n",
48 pool->static_entries);
49}
50
51static ssize_t
52sysfs_show_ptpool_chunks(struct kobject *kobj,
53 struct kobj_attribute *attr,
54 char *buf)
55{
56 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
57 kgsl_driver.ptpool;
58 return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
59}
60
61static ssize_t
62sysfs_show_ptpool_ptsize(struct kobject *kobj,
63 struct kobj_attribute *attr,
64 char *buf)
65{
66 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
67 kgsl_driver.ptpool;
68 return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
69}
70
71static struct kobj_attribute attr_ptpool_entries = {
72 .attr = { .name = "ptpool_entries", .mode = 0444 },
73 .show = sysfs_show_ptpool_entries,
74 .store = NULL,
75};
76
77static struct kobj_attribute attr_ptpool_min = {
78 .attr = { .name = "ptpool_min", .mode = 0444 },
79 .show = sysfs_show_ptpool_min,
80 .store = NULL,
81};
82
83static struct kobj_attribute attr_ptpool_chunks = {
84 .attr = { .name = "ptpool_chunks", .mode = 0444 },
85 .show = sysfs_show_ptpool_chunks,
86 .store = NULL,
87};
88
89static struct kobj_attribute attr_ptpool_ptsize = {
90 .attr = { .name = "ptpool_ptsize", .mode = 0444 },
91 .show = sysfs_show_ptpool_ptsize,
92 .store = NULL,
93};
94
95static struct attribute *ptpool_attrs[] = {
96 &attr_ptpool_entries.attr,
97 &attr_ptpool_min.attr,
98 &attr_ptpool_chunks.attr,
99 &attr_ptpool_ptsize.attr,
100 NULL,
101};
102
103static struct attribute_group ptpool_attr_group = {
104 .attrs = ptpool_attrs,
105};
106
107static int
108_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
109{
110 struct kgsl_ptpool_chunk *chunk;
111 size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
112
113 BUG_ON(count == 0);
114
115 if (get_order(size) >= MAX_ORDER) {
116 KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
117 return -EINVAL;
118 }
119
120 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
121 if (chunk == NULL) {
122 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
123 return -ENOMEM;
124 }
125
126 chunk->size = size;
127 chunk->count = count;
128 chunk->dynamic = dynamic;
129
130 chunk->data = dma_alloc_coherent(NULL, size,
131 &chunk->phys, GFP_KERNEL);
132
133 if (chunk->data == NULL) {
134 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
135 goto err;
136 }
137
138 chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
139
140 if (chunk->bitmap == NULL) {
141 KGSL_CORE_ERR("kzalloc(%d) failed\n",
142 BITS_TO_LONGS(count) * 4);
143 goto err_dma;
144 }
145
146 list_add_tail(&chunk->list, &pool->list);
147
148 pool->chunks++;
149 pool->entries += count;
150
151 if (!dynamic)
152 pool->static_entries += count;
153
154 return 0;
155
156err_dma:
157 dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
158err:
159 kfree(chunk);
160 return -ENOMEM;
161}
162
163static void *
164_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
165{
166 struct kgsl_ptpool_chunk *chunk;
167
168 list_for_each_entry(chunk, &pool->list, list) {
169 int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
170
171 if (bit >= chunk->count)
172 continue;
173
174 set_bit(bit, chunk->bitmap);
175 *physaddr = chunk->phys + (bit * pool->ptsize);
176
177 return chunk->data + (bit * pool->ptsize);
178 }
179
180 return NULL;
181}
182
183/**
184 * kgsl_ptpool_add
185 * @pool: A pointer to a ptpool structure
186 * @entries: Number of entries to add
187 *
188 * Add static entries to the pagetable pool.
189 */
190
191static int
192kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
193{
194 int ret = 0;
195 BUG_ON(count == 0);
196
197 mutex_lock(&pool->lock);
198
199 /* Only 4MB can be allocated in one chunk, so larger allocations
200 need to be split into multiple sections */
201
202 while (count) {
203 int entries = ((count * pool->ptsize) > SZ_4M) ?
204 SZ_4M / pool->ptsize : count;
205
206 /* Add the entries as static, i.e. they don't ever stand
207 a chance of being removed */
208
209 ret = _kgsl_ptpool_add_entries(pool, entries, 0);
210 if (ret)
211 break;
212
213 count -= entries;
214 }
215
216 mutex_unlock(&pool->lock);
217 return ret;
218}
219
220/**
221 * kgsl_ptpool_alloc
222 * @pool: A pointer to a ptpool structure
223 * @addr: A pointer to store the physical address of the chunk
224 *
225 * Allocate a pagetable from the pool. Returns the virtual address
226 * of the pagetable, the physical address is returned in physaddr
227 */
228
229static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
230 unsigned int *physaddr)
231{
232 void *addr = NULL;
233 int ret;
234
235 mutex_lock(&pool->lock);
236 addr = _kgsl_ptpool_get_entry(pool, physaddr);
237 if (addr)
238 goto done;
239
240 /* Add a chunk for 1 more pagetable and mark it as dynamic */
241 ret = _kgsl_ptpool_add_entries(pool, 1, 1);
242
243 if (ret)
244 goto done;
245
246 addr = _kgsl_ptpool_get_entry(pool, physaddr);
247done:
248 mutex_unlock(&pool->lock);
249 return addr;
250}
251
252static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
253{
254 list_del(&chunk->list);
255
256 if (chunk->data)
257 dma_free_coherent(NULL, chunk->size, chunk->data,
258 chunk->phys);
259 kfree(chunk->bitmap);
260 kfree(chunk);
261}
262
263/**
264 * kgsl_ptpool_free
265 * @pool: A pointer to a ptpool structure
266 * @addr: A pointer to the virtual address to free
267 *
268 * Free a pagetable allocated from the pool
269 */
270
271static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
272{
273 struct kgsl_ptpool_chunk *chunk, *tmp;
274
275 if (pool == NULL || addr == NULL)
276 return;
277
278 mutex_lock(&pool->lock);
279 list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
280 if (addr >= chunk->data &&
281 addr < chunk->data + chunk->size) {
282 int bit = ((unsigned long) (addr - chunk->data)) /
283 pool->ptsize;
284
285 clear_bit(bit, chunk->bitmap);
286 memset(addr, 0, pool->ptsize);
287
288 if (chunk->dynamic &&
289 bitmap_empty(chunk->bitmap, chunk->count))
290 _kgsl_ptpool_rm_chunk(chunk);
291
292 break;
293 }
294 }
295
296 mutex_unlock(&pool->lock);
297}
298
299void kgsl_gpummu_ptpool_destroy(void *ptpool)
300{
301 struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
302 struct kgsl_ptpool_chunk *chunk, *tmp;
303
304 if (pool == NULL)
305 return;
306
307 mutex_lock(&pool->lock);
308 list_for_each_entry_safe(chunk, tmp, &pool->list, list)
309 _kgsl_ptpool_rm_chunk(chunk);
310 mutex_unlock(&pool->lock);
311
312 kfree(pool);
313}
314
315/**
316 * kgsl_ptpool_init
317 * @pool: A pointer to a ptpool structure to initialize
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600318 * @entries: The number of inital entries to add to the pool
319 *
320 * Initalize a pool and allocate an initial chunk of entries.
321 */
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600322void *kgsl_gpummu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600323{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600324 int ptsize = KGSL_PAGETABLE_SIZE;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600325 struct kgsl_ptpool *pool;
326 int ret = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600327
328 pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
329 if (!pool) {
330 KGSL_CORE_ERR("Failed to allocate memory "
331 "for ptpool\n");
332 return NULL;
333 }
334
335 pool->ptsize = ptsize;
336 mutex_init(&pool->lock);
337 INIT_LIST_HEAD(&pool->list);
338
339 if (entries) {
340 ret = kgsl_ptpool_add(pool, entries);
341 if (ret)
342 goto err_ptpool_remove;
343 }
344
345 ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
346 if (ret) {
347 KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
348 "statistics: %d\n", ret);
349 goto err_ptpool_remove;
350 }
351 return (void *)pool;
352
353err_ptpool_remove:
354 kgsl_gpummu_ptpool_destroy(pool);
355 return NULL;
356}
357
358int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
359 unsigned int pt_base)
360{
Shubhraprakash Das528aa462012-03-01 14:56:28 -0700361 struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
362 return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600363}
364
365void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
366{
367 struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
368 mmu_specific_pt;
369 kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
370 gpummu_pt->base.hostptr);
371
372 kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
373
374 kfree(gpummu_pt->tlbflushfilter.base);
375
376 kfree(gpummu_pt);
377}
378
379static inline uint32_t
380kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
381{
382 return (va - va_base) >> PAGE_SHIFT;
383}
384
385static inline void
386kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
387{
388 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700389 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
390 baseptr[pte] = val;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600391}
392
393static inline uint32_t
394kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
395{
396 uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700397 BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
398 return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600399}
400
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600401static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600402{
403 unsigned int reg;
404 unsigned int ptbase;
405
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600406 kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
407 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600408
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600409 KGSL_MEM_CRIT(mmu->device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600410 "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
411 reg & ~(PAGE_SIZE - 1),
412 kgsl_mmu_get_ptname_from_ptbase(ptbase),
413 reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
Jeremy Gebben5935b7a2012-05-09 14:52:34 -0600414 trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
415 kgsl_mmu_get_ptname_from_ptbase(ptbase),
416 reg & 0x02 ? "WRITE" : "READ");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600417}
418
419static void *kgsl_gpummu_create_pagetable(void)
420{
421 struct kgsl_gpummu_pt *gpummu_pt;
422
423 gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
424 GFP_KERNEL);
425 if (!gpummu_pt)
426 return NULL;
427
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600428 gpummu_pt->last_superpte = 0;
429
430 gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
431 (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
432 gpummu_pt->tlbflushfilter.base = (unsigned int *)
433 kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
434 if (!gpummu_pt->tlbflushfilter.base) {
435 KGSL_CORE_ERR("kzalloc(%d) failed\n",
436 gpummu_pt->tlbflushfilter.size);
437 goto err_free_gpummu;
438 }
439 GSL_TLBFLUSH_FILTER_RESET();
440
441 gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
442 kgsl_driver.ptpool,
443 &gpummu_pt->base.physaddr);
444
445 if (gpummu_pt->base.hostptr == NULL)
446 goto err_flushfilter;
447
448 /* ptpool allocations are from coherent memory, so update the
449 device statistics acordingly */
450
451 KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
452 kgsl_driver.stats.coherent_max);
453
454 gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
455 gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
456
457 return (void *)gpummu_pt;
458
459err_flushfilter:
460 kfree(gpummu_pt->tlbflushfilter.base);
461err_free_gpummu:
462 kfree(gpummu_pt);
463
464 return NULL;
465}
466
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600467static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600468 uint32_t flags)
469{
470 struct kgsl_gpummu_pt *gpummu_pt;
471 if (!kgsl_mmu_enabled())
472 return;
473
474 if (flags & KGSL_MMUFLAGS_PTUPDATE) {
Jordan Crousea29a2e02012-08-14 09:09:23 -0600475 kgsl_idle(mmu->device);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600476 gpummu_pt = mmu->hwpagetable->priv;
477 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600478 gpummu_pt->base.gpuaddr);
479 }
480
481 if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
482 /* Invalidate all and tc */
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600483 kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600484 }
485}
486
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600487static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600488 struct kgsl_pagetable *pagetable,
489 unsigned int context_id)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600490{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600491 if (mmu->flags & KGSL_FLAGS_STARTED) {
492 /* page table not current, then setup mmu to use new
493 * specified page table
494 */
495 if (mmu->hwpagetable != pagetable) {
496 mmu->hwpagetable = pagetable;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600497 /* Since we do a TLB flush the tlb_flags should
498 * be cleared by calling kgsl_mmu_pt_get_flags
499 */
500 kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600501
502 /* call device specific set page table */
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600503 kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600504 KGSL_MMUFLAGS_PTUPDATE);
505 }
506 }
507}
508
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600509static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600510{
511 /*
512 * intialize device mmu
513 *
514 * call this with the global lock held
515 */
516 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600517
518 /* sub-client MMU lookups require address translation */
519 if ((mmu->config & ~0x1) > 0) {
520 /*make sure virtual address range is a multiple of 64Kb */
521 if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
522 KGSL_CORE_ERR("Invalid pagetable size requested "
523 "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
524 return -EINVAL;
525 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600526 }
527
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600528 dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600529 __func__);
530 return status;
531}
532
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600533static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600534{
535 /*
536 * intialize device mmu
537 *
538 * call this with the global lock held
539 */
540
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600541 struct kgsl_device *device = mmu->device;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600542 struct kgsl_gpummu_pt *gpummu_pt;
543
544 if (mmu->flags & KGSL_FLAGS_STARTED)
545 return 0;
546
547 /* MMU not enabled */
548 if ((mmu->config & 0x1) == 0)
549 return 0;
550
551 /* setup MMU and sub-client behavior */
552 kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
553
554 /* idle device */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600555 kgsl_idle(device);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600556
557 /* enable axi interrupts */
558 kgsl_regwrite(device, MH_INTERRUPT_MASK,
559 GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
560
561 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
562 mmu->setstate_memory.size);
563
564 /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
565 * to complete transactions in case of an MMU fault. Note that
566 * we'll leave the bottom 32 bytes of the setstate_memory for other
567 * purposes (e.g. use it when dummy read cycles are needed
568 * for other blocks) */
569 kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
570 mmu->setstate_memory.physaddr + 32);
571
572 if (mmu->defaultpagetable == NULL)
573 mmu->defaultpagetable =
574 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
575
576 /* Return error if the default pagetable doesn't exist */
577 if (mmu->defaultpagetable == NULL)
578 return -ENOMEM;
579
580 mmu->hwpagetable = mmu->defaultpagetable;
581 gpummu_pt = mmu->hwpagetable->priv;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600582 kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600583 gpummu_pt->base.gpuaddr);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600584 kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600585 (KGSL_PAGETABLE_BASE |
586 (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600587 kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600588 mmu->flags |= KGSL_FLAGS_STARTED;
589
590 return 0;
591}
592
593static int
594kgsl_gpummu_unmap(void *mmu_specific_pt,
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600595 struct kgsl_memdesc *memdesc,
596 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600597{
598 unsigned int numpages;
599 unsigned int pte, ptefirst, ptelast, superpte;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600600 unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600601 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
602
603 /* All GPU addresses as assigned are page aligned, but some
604 functions purturb the gpuaddr with an offset, so apply the
605 mask here to make sure we have the right address */
606
607 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
608
609 numpages = (range >> PAGE_SHIFT);
610 if (range & (PAGE_SIZE - 1))
611 numpages++;
612
613 ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
614 ptelast = ptefirst + numpages;
615
616 superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
617 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
618 for (pte = ptefirst; pte < ptelast; pte++) {
619#ifdef VERBOSE_DEBUG
620 /* check if PTE exists */
621 if (!kgsl_pt_map_get(gpummu_pt, pte))
622 KGSL_CORE_ERR("pt entry %x is already "
623 "unmapped for pagetable %p\n", pte, gpummu_pt);
624#endif
625 kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
626 superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
627 if (pte == superpte)
628 GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
629 GSL_PT_SUPER_PTE);
630 }
631
632 /* Post all writes to the pagetable */
633 wmb();
634
635 return 0;
636}
637
Jordan Croused17e9aa2011-10-12 16:57:48 -0600638#define SUPERPTE_IS_DIRTY(_p) \
639(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
640GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
641
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600642static int
643kgsl_gpummu_map(void *mmu_specific_pt,
644 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600645 unsigned int protflags,
646 unsigned int *tlb_flags)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600647{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600648 unsigned int pte;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600649 struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600650 struct scatterlist *s;
651 int flushtlb = 0;
652 int i;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600653
Jordan Croused17e9aa2011-10-12 16:57:48 -0600654 pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600655
Jordan Croused17e9aa2011-10-12 16:57:48 -0600656 /* Flush the TLB if the first PTE isn't at the superpte boundary */
657 if (pte & (GSL_PT_SUPER_PTE - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600658 flushtlb = 1;
659
Jordan Croused17e9aa2011-10-12 16:57:48 -0600660 for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600661 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600662 unsigned int j;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600663
Jordan Croused17e9aa2011-10-12 16:57:48 -0600664 /* Each sg entry might be multiple pages long */
665 for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
666 if (SUPERPTE_IS_DIRTY(pte))
667 flushtlb = 1;
668 kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600669 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600670 }
671
Jordan Croused17e9aa2011-10-12 16:57:48 -0600672 /* Flush the TLB if the last PTE isn't at the superpte boundary */
673 if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
674 flushtlb = 1;
675
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600676 wmb();
677
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600678 if (flushtlb) {
679 /*set all devices as needing flushing*/
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600680 *tlb_flags = UINT_MAX;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600681 GSL_TLBFLUSH_FILTER_RESET();
682 }
683
684 return 0;
685}
686
Shubhraprakash Das79447952012-04-26 18:12:23 -0600687static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600688{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600689 mmu->flags &= ~KGSL_FLAGS_STARTED;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600690}
691
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600692static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600693{
694 /*
695 * close device mmu
696 *
697 * call this with the global lock held
698 */
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600699 if (mmu->setstate_memory.gpuaddr)
700 kgsl_sharedmem_free(&mmu->setstate_memory);
701
702 if (mmu->defaultpagetable)
703 kgsl_mmu_putpagetable(mmu->defaultpagetable);
704
705 return 0;
706}
707
708static unsigned int
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600709kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600710{
711 unsigned int ptbase;
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600712 kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600713 return ptbase;
714}
715
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600716static unsigned int
717kgsl_gpummu_pt_get_base_addr(struct kgsl_pagetable *pt)
718{
719 struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
720 return gpummu_pt->base.gpuaddr;
721}
722
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600723struct kgsl_mmu_ops gpummu_ops = {
724 .mmu_init = kgsl_gpummu_init,
725 .mmu_close = kgsl_gpummu_close,
726 .mmu_start = kgsl_gpummu_start,
727 .mmu_stop = kgsl_gpummu_stop,
728 .mmu_setstate = kgsl_gpummu_setstate,
729 .mmu_device_setstate = kgsl_gpummu_default_setstate,
730 .mmu_pagefault = kgsl_gpummu_pagefault,
731 .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600732 .mmu_enable_clk = NULL,
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600733 .mmu_disable_clk_on_ts = NULL,
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600734 .mmu_get_pt_lsb = NULL,
Shubhraprakash Dasa5b1db42012-05-09 18:02:34 -0600735 .mmu_get_reg_map_desc = NULL,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600736};
737
738struct kgsl_mmu_pt_ops gpummu_pt_ops = {
739 .mmu_map = kgsl_gpummu_map,
740 .mmu_unmap = kgsl_gpummu_unmap,
741 .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
742 .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
743 .mmu_pt_equal = kgsl_gpummu_pt_equal,
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600744 .mmu_pt_get_base_addr = kgsl_gpummu_pt_get_base_addr,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600745};