blob: bf330ee021bdca63948f71e329f29ba4f2e2a93d [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_MMU_H
14#define __KGSL_MMU_H
15
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060016#include <mach/iommu.h>
17
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060018/*
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060019 * These defines control the address range for allocations that
20 * are mapped into all pagetables.
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060021 */
22#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xC0000000
23#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060024
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060025#define KGSL_MMU_ALIGN_SHIFT 13
26#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028/* Identifier for the global page table */
29/* Per process page tables will probably pass in the thread group
30 as an identifier */
31
32#define KGSL_MMU_GLOBAL_PT 0
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060033#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060035struct kgsl_device;
36
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037#define GSL_PT_SUPER_PTE 8
38#define GSL_PT_PAGE_WV 0x00000001
39#define GSL_PT_PAGE_RV 0x00000002
40#define GSL_PT_PAGE_DIRTY 0x00000004
41
42/* MMU registers - the register locations for all cores are the
43 same. The method for getting to those locations differs between
44 2D and 3D, but the 2D and 3D register functions do that magic
45 for us */
46
47#define MH_MMU_CONFIG 0x0040
48#define MH_MMU_VA_RANGE 0x0041
49#define MH_MMU_PT_BASE 0x0042
50#define MH_MMU_PAGE_FAULT 0x0043
51#define MH_MMU_TRAN_ERROR 0x0044
52#define MH_MMU_INVALIDATE 0x0045
53#define MH_MMU_MPU_BASE 0x0046
54#define MH_MMU_MPU_END 0x0047
55
56#define MH_INTERRUPT_MASK 0x0A42
57#define MH_INTERRUPT_STATUS 0x0A43
58#define MH_INTERRUPT_CLEAR 0x0A44
59#define MH_AXI_ERROR 0x0A45
Jeremy Gebben4e8aada2011-07-12 10:07:47 -060060#define MH_ARBITER_CONFIG 0x0A40
61#define MH_DEBUG_CTRL 0x0A4E
62#define MH_DEBUG_DATA 0x0A4F
63#define MH_AXI_HALT_CONTROL 0x0A50
64#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
65#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066
67/* MH_MMU_CONFIG bit definitions */
68
69#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
70#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
71#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
72#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
73#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
74#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
75#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
76#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
77#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
78#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
79#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
80
81/* MMU Flags */
82#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
83#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
84
85#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
86#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
87#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
88
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089#define KGSL_MMU_INT_MASK \
90 (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
91 MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
92 MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060094enum kgsl_mmutype {
95 KGSL_MMU_TYPE_GPU = 0,
96 KGSL_MMU_TYPE_IOMMU,
97 KGSL_MMU_TYPE_NONE
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098};
99
100struct kgsl_pagetable {
101 spinlock_t lock;
102 struct kref refcount;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103 unsigned int max_entries;
104 struct gen_pool *pool;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600105 struct gen_pool *kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 struct list_head list;
107 unsigned int name;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 struct kobject *kobj;
109
110 struct {
111 unsigned int entries;
112 unsigned int mapped;
113 unsigned int max_mapped;
114 unsigned int max_entries;
115 } stats;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600116 const struct kgsl_mmu_pt_ops *pt_ops;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600117 unsigned int tlb_flags;
Tarun Karrab8107322013-02-07 13:46:02 -0800118 unsigned int fault_addr;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600119 void *priv;
120};
121
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600122struct kgsl_mmu;
123
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600124struct kgsl_mmu_ops {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600125 int (*mmu_init) (struct kgsl_mmu *mmu);
126 int (*mmu_close) (struct kgsl_mmu *mmu);
127 int (*mmu_start) (struct kgsl_mmu *mmu);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600128 void (*mmu_stop) (struct kgsl_mmu *mmu);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600129 void (*mmu_setstate) (struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600130 struct kgsl_pagetable *pagetable,
131 unsigned int context_id);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600132 void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600133 uint32_t flags);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600134 void (*mmu_pagefault) (struct kgsl_mmu *mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600135 unsigned int (*mmu_get_current_ptbase)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600136 (struct kgsl_mmu *mmu);
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600137 void (*mmu_disable_clk_on_ts)
138 (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600139 int (*mmu_enable_clk)
140 (struct kgsl_mmu *mmu, int ctx_id);
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600141 int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
142 unsigned int unit_id,
143 enum kgsl_iommu_context_id ctx_id);
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700144 unsigned int (*mmu_get_reg_gpuaddr)(struct kgsl_mmu *mmu,
145 int iommu_unit_num, int ctx_id, int reg);
146 int (*mmu_get_num_iommu_units)(struct kgsl_mmu *mmu);
147 int (*mmu_pt_equal) (struct kgsl_mmu *mmu,
148 struct kgsl_pagetable *pt,
149 unsigned int pt_base);
150 unsigned int (*mmu_get_pt_base_addr)
151 (struct kgsl_mmu *mmu,
152 struct kgsl_pagetable *pt);
Tarun Karra9c070822012-11-27 16:43:51 -0700153 unsigned int (*mmu_sync_lock)
154 (struct kgsl_mmu *mmu,
155 unsigned int *cmds);
156 unsigned int (*mmu_sync_unlock)
157 (struct kgsl_mmu *mmu,
158 unsigned int *cmds);
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600159 int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
160 struct kgsl_pagetable *pt);
161 void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
162 struct kgsl_pagetable *pt);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600163};
164
165struct kgsl_mmu_pt_ops {
166 int (*mmu_map) (void *mmu_pt,
167 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600168 unsigned int protflags,
169 unsigned int *tlb_flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600170 int (*mmu_unmap) (void *mmu_pt,
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600171 struct kgsl_memdesc *memdesc,
172 unsigned int *tlb_flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600173 void *(*mmu_create_pagetable) (void);
174 void (*mmu_destroy_pagetable) (void *pt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175};
176
Tarun Karra9c070822012-11-27 16:43:51 -0700177#define KGSL_MMU_FLAGS_IOMMU_SYNC BIT(31)
178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179struct kgsl_mmu {
180 unsigned int refcnt;
181 uint32_t flags;
182 struct kgsl_device *device;
183 unsigned int config;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600184 struct kgsl_memdesc setstate_memory;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 /* current page table object being used by device mmu */
186 struct kgsl_pagetable *defaultpagetable;
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600187 /* pagetable object used for priv bank of IOMMU */
188 struct kgsl_pagetable *priv_bank_table;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 struct kgsl_pagetable *hwpagetable;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600190 const struct kgsl_mmu_ops *mmu_ops;
191 void *priv;
Shubhraprakash Das2747cf62012-09-27 23:05:43 -0700192 int fault;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193};
194
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600195#include "kgsl_gpummu.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600197extern struct kgsl_mmu_ops iommu_ops;
198extern struct kgsl_mmu_pt_ops iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
200struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600201void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600202void kgsl_mh_start(struct kgsl_device *device);
203void kgsl_mh_intrcallback(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204int kgsl_mmu_init(struct kgsl_device *device);
205int kgsl_mmu_start(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206int kgsl_mmu_close(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
208 struct kgsl_memdesc *memdesc,
209 unsigned int protflags);
210int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
211 struct kgsl_memdesc *memdesc, unsigned int protflags);
212int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
213 struct kgsl_memdesc *memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600215void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
216 uint32_t flags);
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700217int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
218 unsigned int pt_base);
Tarun Karrab8107322013-02-07 13:46:02 -0800219unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
220 unsigned int pt_base, unsigned int addr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600221int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
222 enum kgsl_deviceid id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600223void kgsl_mmu_ptpool_destroy(void *ptpool);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600224void *kgsl_mmu_ptpool_init(int entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600225int kgsl_mmu_enabled(void);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600226void kgsl_mmu_set_mmutype(char *mmutype);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600227enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
Shubhraprakash Dase7652cf2012-08-11 17:15:19 -0700228int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600229
230/*
231 * Static inline functions of MMU that simply call the SMMU specific
232 * function using a function pointer. These functions can be thought
233 * of as wrappers around the actual function
234 */
235
236static inline unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_mmu *mmu)
237{
238 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_current_ptbase)
239 return mmu->mmu_ops->mmu_get_current_ptbase(mmu);
240 else
241 return 0;
242}
243
244static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600245 struct kgsl_pagetable *pagetable,
246 unsigned int context_id)
Shubhraprakash Das79447952012-04-26 18:12:23 -0600247{
248 if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600249 mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600250}
251
252static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
253 uint32_t flags)
254{
255 if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
256 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
257}
258
259static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
260{
261 if (mmu->mmu_ops && mmu->mmu_ops->mmu_stop)
262 mmu->mmu_ops->mmu_stop(mmu);
263}
264
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700265static inline int kgsl_mmu_pt_equal(struct kgsl_mmu *mmu,
266 struct kgsl_pagetable *pt,
Shubhraprakash Das79447952012-04-26 18:12:23 -0600267 unsigned int pt_base)
268{
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700269 if (mmu->mmu_ops && mmu->mmu_ops->mmu_pt_equal)
270 return mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base);
271 else
Shubhraprakash Das79447952012-04-26 18:12:23 -0600272 return 1;
Shubhraprakash Das79447952012-04-26 18:12:23 -0600273}
274
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700275static inline unsigned int kgsl_mmu_get_pt_base_addr(struct kgsl_mmu *mmu,
276 struct kgsl_pagetable *pt)
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600277{
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700278 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_base_addr)
279 return mmu->mmu_ops->mmu_get_pt_base_addr(mmu, pt);
Shubhraprakash Dasa5b1db42012-05-09 18:02:34 -0600280 else
281 return 0;
282}
283
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600284static inline int kgsl_mmu_get_pt_lsb(struct kgsl_mmu *mmu,
285 unsigned int unit_id,
286 enum kgsl_iommu_context_id ctx_id)
287{
288 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_lsb)
289 return mmu->mmu_ops->mmu_get_pt_lsb(mmu, unit_id, ctx_id);
290 else
291 return 0;
292}
293
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600294static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
295 int ctx_id)
296{
297 if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
298 return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
299 else
300 return 0;
301}
302
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600303static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
304 unsigned int ts, bool ts_valid)
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600305{
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600306 if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
307 mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600308}
309
Anoop Kumar Yerukala5479c9c2012-07-08 14:53:06 +0530310static inline unsigned int kgsl_mmu_get_int_mask(void)
311{
312 /* Dont enable gpummu interrupts, if iommu is enabled */
313 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
314 return KGSL_MMU_INT_MASK;
315 else
316 return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
317 MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
318}
319
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700320static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
321 int iommu_unit_num,
322 int ctx_id, int reg)
323{
324 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_gpuaddr)
325 return mmu->mmu_ops->mmu_get_reg_gpuaddr(mmu, iommu_unit_num,
326 ctx_id, reg);
327 else
328 return 0;
329}
330
331static inline int kgsl_mmu_get_num_iommu_units(struct kgsl_mmu *mmu)
332{
333 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_num_iommu_units)
334 return mmu->mmu_ops->mmu_get_num_iommu_units(mmu);
335 else
336 return 0;
337}
338
Tarun Karra9c070822012-11-27 16:43:51 -0700339static inline int kgsl_mmu_sync_lock(struct kgsl_mmu *mmu,
340 unsigned int *cmds)
341{
342 if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
343 mmu->mmu_ops && mmu->mmu_ops->mmu_sync_lock)
344 return mmu->mmu_ops->mmu_sync_lock(mmu, cmds);
345 else
346 return 0;
347}
348
349static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu,
350 unsigned int *cmds)
351{
352 if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
353 mmu->mmu_ops && mmu->mmu_ops->mmu_sync_unlock)
354 return mmu->mmu_ops->mmu_sync_unlock(mmu, cmds);
355 else
356 return 0;
357}
358
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600359/*
360 * kgsl_mmu_is_perprocess() - Runtime check for per-process
361 * pagetables.
362 *
363 * Returns non-zero if per-process pagetables are enabled,
364 * 0 if not.
365 */
366#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
367static inline int kgsl_mmu_is_perprocess(void)
368{
369
370 /* We presently do not support per-process for IOMMU-v2 */
371 return (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU)
372 || msm_soc_version_supports_iommu_v1();
373}
374#else
375static inline int kgsl_mmu_is_perprocess(void)
376{
377 return 0;
378}
379#endif
380
381/*
382 * kgsl_mmu_base_addr() - Get gpu virtual address base.
383 *
384 * Returns the start address of the gpu
385 * virtual address space.
386 */
387static inline unsigned int kgsl_mmu_get_base_addr(void)
388{
389 return KGSL_PAGETABLE_BASE;
390}
391
392/*
393 * kgsl_mmu_get_ptsize() - Get gpu pagetable size
394 *
395 * Returns the usable size of the gpu address space.
396 */
397static inline unsigned int kgsl_mmu_get_ptsize(void)
398{
399 /*
400 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
401 * it makes more sense to return a smaller range and leave the rest of
402 * the virtual range for future improvements
403 */
404 enum kgsl_mmutype mmu_type = kgsl_mmu_get_mmutype();
405
406 if (KGSL_MMU_TYPE_GPU == mmu_type)
407 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
408 else if (KGSL_MMU_TYPE_IOMMU == mmu_type)
409 return SZ_2G;
410 return 0;
411}
412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413#endif /* __KGSL_MMU_H */