blob: 9d1bffa945454b1d59e73c61778c42f8a8ab717a [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_MMU_H
14#define __KGSL_MMU_H
15
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060016#include <mach/iommu.h>
17
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060018/*
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060019 * These defines control the address range for allocations that
20 * are mapped into all pagetables.
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060021 */
Jeremy Gebbenfec05c22013-05-28 16:59:29 -060022#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060023#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
Shubhraprakash Das84fdb112012-04-04 12:49:31 -060024
Jeremy Gebbenfec05c22013-05-28 16:59:29 -060025#define KGSL_MMU_ALIGN_MASK (~((1 << PAGE_SHIFT) - 1))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027/* Identifier for the global page table */
28/* Per process page tables will probably pass in the thread group
29 as an identifier */
30
31#define KGSL_MMU_GLOBAL_PT 0
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -060032#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060034struct kgsl_device;
35
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036#define GSL_PT_SUPER_PTE 8
37#define GSL_PT_PAGE_WV 0x00000001
38#define GSL_PT_PAGE_RV 0x00000002
39#define GSL_PT_PAGE_DIRTY 0x00000004
40
41/* MMU registers - the register locations for all cores are the
42 same. The method for getting to those locations differs between
43 2D and 3D, but the 2D and 3D register functions do that magic
44 for us */
45
46#define MH_MMU_CONFIG 0x0040
47#define MH_MMU_VA_RANGE 0x0041
48#define MH_MMU_PT_BASE 0x0042
49#define MH_MMU_PAGE_FAULT 0x0043
50#define MH_MMU_TRAN_ERROR 0x0044
51#define MH_MMU_INVALIDATE 0x0045
52#define MH_MMU_MPU_BASE 0x0046
53#define MH_MMU_MPU_END 0x0047
54
55#define MH_INTERRUPT_MASK 0x0A42
56#define MH_INTERRUPT_STATUS 0x0A43
57#define MH_INTERRUPT_CLEAR 0x0A44
58#define MH_AXI_ERROR 0x0A45
Jeremy Gebben4e8aada2011-07-12 10:07:47 -060059#define MH_ARBITER_CONFIG 0x0A40
60#define MH_DEBUG_CTRL 0x0A4E
61#define MH_DEBUG_DATA 0x0A4F
62#define MH_AXI_HALT_CONTROL 0x0A50
63#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
64#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
66/* MH_MMU_CONFIG bit definitions */
67
68#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
69#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
70#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
71#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
72#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
73#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
74#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
75#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
76#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
77#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
78#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
79
80/* MMU Flags */
81#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
82#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
83
84#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
85#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
86#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088#define KGSL_MMU_INT_MASK \
89 (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
90 MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
91 MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060093enum kgsl_mmutype {
94 KGSL_MMU_TYPE_GPU = 0,
95 KGSL_MMU_TYPE_IOMMU,
96 KGSL_MMU_TYPE_NONE
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097};
98
99struct kgsl_pagetable {
100 spinlock_t lock;
101 struct kref refcount;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 unsigned int max_entries;
103 struct gen_pool *pool;
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600104 struct gen_pool *kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105 struct list_head list;
106 unsigned int name;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 struct kobject *kobj;
108
109 struct {
110 unsigned int entries;
111 unsigned int mapped;
112 unsigned int max_mapped;
113 unsigned int max_entries;
114 } stats;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600115 const struct kgsl_mmu_pt_ops *pt_ops;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600116 unsigned int tlb_flags;
Tarun Karrab8107322013-02-07 13:46:02 -0800117 unsigned int fault_addr;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600118 void *priv;
119};
120
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600121struct kgsl_mmu;
122
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600123struct kgsl_mmu_ops {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600124 int (*mmu_init) (struct kgsl_mmu *mmu);
125 int (*mmu_close) (struct kgsl_mmu *mmu);
126 int (*mmu_start) (struct kgsl_mmu *mmu);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600127 void (*mmu_stop) (struct kgsl_mmu *mmu);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600128 void (*mmu_setstate) (struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600129 struct kgsl_pagetable *pagetable,
130 unsigned int context_id);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600131 void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600132 uint32_t flags);
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600133 void (*mmu_pagefault) (struct kgsl_mmu *mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600134 unsigned int (*mmu_get_current_ptbase)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600135 (struct kgsl_mmu *mmu);
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600136 void (*mmu_disable_clk_on_ts)
137 (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
Shubhraprakash Das9fb38ac2012-05-01 00:41:30 -0600138 int (*mmu_enable_clk)
139 (struct kgsl_mmu *mmu, int ctx_id);
Shubhraprakash Dasfce27362012-05-09 17:44:14 -0600140 int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
141 unsigned int unit_id,
142 enum kgsl_iommu_context_id ctx_id);
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700143 unsigned int (*mmu_get_reg_gpuaddr)(struct kgsl_mmu *mmu,
144 int iommu_unit_num, int ctx_id, int reg);
145 int (*mmu_get_num_iommu_units)(struct kgsl_mmu *mmu);
146 int (*mmu_pt_equal) (struct kgsl_mmu *mmu,
147 struct kgsl_pagetable *pt,
148 unsigned int pt_base);
149 unsigned int (*mmu_get_pt_base_addr)
150 (struct kgsl_mmu *mmu,
151 struct kgsl_pagetable *pt);
Tarun Karra9c070822012-11-27 16:43:51 -0700152 unsigned int (*mmu_sync_lock)
153 (struct kgsl_mmu *mmu,
154 unsigned int *cmds);
155 unsigned int (*mmu_sync_unlock)
156 (struct kgsl_mmu *mmu,
157 unsigned int *cmds);
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600158 int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
159 struct kgsl_pagetable *pt);
160 void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
161 struct kgsl_pagetable *pt);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600162};
163
164struct kgsl_mmu_pt_ops {
165 int (*mmu_map) (void *mmu_pt,
166 struct kgsl_memdesc *memdesc,
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600167 unsigned int protflags,
168 unsigned int *tlb_flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600169 int (*mmu_unmap) (void *mmu_pt,
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600170 struct kgsl_memdesc *memdesc,
171 unsigned int *tlb_flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600172 void *(*mmu_create_pagetable) (void);
173 void (*mmu_destroy_pagetable) (void *pt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174};
175
Tarun Karra9c070822012-11-27 16:43:51 -0700176#define KGSL_MMU_FLAGS_IOMMU_SYNC BIT(31)
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178struct kgsl_mmu {
179 unsigned int refcnt;
180 uint32_t flags;
181 struct kgsl_device *device;
182 unsigned int config;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600183 struct kgsl_memdesc setstate_memory;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 /* current page table object being used by device mmu */
185 struct kgsl_pagetable *defaultpagetable;
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600186 /* pagetable object used for priv bank of IOMMU */
187 struct kgsl_pagetable *priv_bank_table;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 struct kgsl_pagetable *hwpagetable;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600189 const struct kgsl_mmu_ops *mmu_ops;
190 void *priv;
Shubhraprakash Das2747cf62012-09-27 23:05:43 -0700191 int fault;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192};
193
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600194#include "kgsl_gpummu.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600196extern struct kgsl_mmu_ops iommu_ops;
197extern struct kgsl_mmu_pt_ops iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198
199struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600200void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600201void kgsl_mh_start(struct kgsl_device *device);
202void kgsl_mh_intrcallback(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203int kgsl_mmu_init(struct kgsl_device *device);
204int kgsl_mmu_start(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205int kgsl_mmu_close(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
207 struct kgsl_memdesc *memdesc,
208 unsigned int protflags);
209int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
210 struct kgsl_memdesc *memdesc, unsigned int protflags);
211int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
212 struct kgsl_memdesc *memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600214void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
215 uint32_t flags);
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700216int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
217 unsigned int pt_base);
Tarun Karrab8107322013-02-07 13:46:02 -0800218unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
219 unsigned int pt_base, unsigned int addr);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600220int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
221 enum kgsl_deviceid id);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600222void kgsl_mmu_ptpool_destroy(void *ptpool);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600223void *kgsl_mmu_ptpool_init(int entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600224int kgsl_mmu_enabled(void);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600225void kgsl_mmu_set_mmutype(char *mmutype);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600226enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
Shubhraprakash Dase7652cf2012-08-11 17:15:19 -0700227int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600228
229/*
230 * Static inline functions of MMU that simply call the SMMU specific
231 * function using a function pointer. These functions can be thought
232 * of as wrappers around the actual function
233 */
234
235static inline unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_mmu *mmu)
236{
237 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_current_ptbase)
238 return mmu->mmu_ops->mmu_get_current_ptbase(mmu);
239 else
240 return 0;
241}
242
243static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600244 struct kgsl_pagetable *pagetable,
245 unsigned int context_id)
Shubhraprakash Das79447952012-04-26 18:12:23 -0600246{
247 if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600248 mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
Shubhraprakash Das79447952012-04-26 18:12:23 -0600249}
250
251static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
252 uint32_t flags)
253{
254 if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
255 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
256}
257
258static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
259{
260 if (mmu->mmu_ops && mmu->mmu_ops->mmu_stop)
261 mmu->mmu_ops->mmu_stop(mmu);
262}
263
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700264static inline int kgsl_mmu_pt_equal(struct kgsl_mmu *mmu,
265 struct kgsl_pagetable *pt,
Shubhraprakash Das79447952012-04-26 18:12:23 -0600266 unsigned int pt_base)
267{
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700268 if (mmu->mmu_ops && mmu->mmu_ops->mmu_pt_equal)
269 return mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base);
270 else
Shubhraprakash Das79447952012-04-26 18:12:23 -0600271 return 1;
Shubhraprakash Das79447952012-04-26 18:12:23 -0600272}
273
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700274static inline unsigned int kgsl_mmu_get_pt_base_addr(struct kgsl_mmu *mmu,
275 struct kgsl_pagetable *pt)
Shubhraprakash Das5a610b52012-05-09 17:31:54 -0600276{
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700277 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_base_addr)
278 return mmu->mmu_ops->mmu_get_pt_base_addr(mmu, pt);
Shubhraprakash Dasa5b1db42012-05-09 18:02:34 -0600279 else
280 return 0;
281}
282
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600283static inline int kgsl_mmu_get_pt_lsb(struct kgsl_mmu *mmu,
284 unsigned int unit_id,
285 enum kgsl_iommu_context_id ctx_id)
286{
287 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_lsb)
288 return mmu->mmu_ops->mmu_get_pt_lsb(mmu, unit_id, ctx_id);
289 else
290 return 0;
291}
292
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600293static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
294 int ctx_id)
295{
296 if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
297 return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
298 else
299 return 0;
300}
301
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600302static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
303 unsigned int ts, bool ts_valid)
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600304{
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600305 if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
306 mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
Shubhraprakash Daseb6b8542012-05-09 22:42:24 -0600307}
308
Anoop Kumar Yerukala5479c9c2012-07-08 14:53:06 +0530309static inline unsigned int kgsl_mmu_get_int_mask(void)
310{
311 /* Dont enable gpummu interrupts, if iommu is enabled */
312 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
313 return KGSL_MMU_INT_MASK;
314 else
315 return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
316 MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
317}
318
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700319static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
320 int iommu_unit_num,
321 int ctx_id, int reg)
322{
323 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_gpuaddr)
324 return mmu->mmu_ops->mmu_get_reg_gpuaddr(mmu, iommu_unit_num,
325 ctx_id, reg);
326 else
327 return 0;
328}
329
330static inline int kgsl_mmu_get_num_iommu_units(struct kgsl_mmu *mmu)
331{
332 if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_num_iommu_units)
333 return mmu->mmu_ops->mmu_get_num_iommu_units(mmu);
334 else
335 return 0;
336}
337
Tarun Karra9c070822012-11-27 16:43:51 -0700338static inline int kgsl_mmu_sync_lock(struct kgsl_mmu *mmu,
339 unsigned int *cmds)
340{
341 if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
342 mmu->mmu_ops && mmu->mmu_ops->mmu_sync_lock)
343 return mmu->mmu_ops->mmu_sync_lock(mmu, cmds);
344 else
345 return 0;
346}
347
348static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu,
349 unsigned int *cmds)
350{
351 if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
352 mmu->mmu_ops && mmu->mmu_ops->mmu_sync_unlock)
353 return mmu->mmu_ops->mmu_sync_unlock(mmu, cmds);
354 else
355 return 0;
356}
357
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600358/*
359 * kgsl_mmu_is_perprocess() - Runtime check for per-process
360 * pagetables.
361 *
362 * Returns non-zero if per-process pagetables are enabled,
363 * 0 if not.
364 */
365#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
366static inline int kgsl_mmu_is_perprocess(void)
367{
368
369 /* We presently do not support per-process for IOMMU-v2 */
370 return (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU)
371 || msm_soc_version_supports_iommu_v1();
372}
373#else
374static inline int kgsl_mmu_is_perprocess(void)
375{
376 return 0;
377}
378#endif
379
380/*
381 * kgsl_mmu_base_addr() - Get gpu virtual address base.
382 *
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600383 * Returns the start address of the allocatable gpu
384 * virtual address space. Other mappings that mirror
385 * the CPU address space are possible outside this range.
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600386 */
387static inline unsigned int kgsl_mmu_get_base_addr(void)
388{
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600389 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()
390 || !kgsl_mmu_is_perprocess())
391 return KGSL_PAGETABLE_BASE;
392 /*
393 * This is the start of the kernel address
394 * space, so allocations from this range will
395 * never conflict with userpace addresses
396 */
397 return PAGE_OFFSET;
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600398}
399
400/*
401 * kgsl_mmu_get_ptsize() - Get gpu pagetable size
402 *
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600403 * Returns the usable size of the gpu allocatable
404 * address space.
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600405 */
406static inline unsigned int kgsl_mmu_get_ptsize(void)
407{
408 /*
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600409 * For IOMMU per-process pagetables, the allocatable range
410 * and the kernel global range must both be outside
411 * the userspace address range. There is a 1Mb gap
412 * between these address ranges to make overrun
413 * detection easier.
414 * For the shared pagetable case use 2GB and because
415 * mirroring the CPU address space is not possible and
416 * we're better off with extra room.
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600417 */
418 enum kgsl_mmutype mmu_type = kgsl_mmu_get_mmutype();
419
420 if (KGSL_MMU_TYPE_GPU == mmu_type)
421 return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600422 else if (KGSL_MMU_TYPE_IOMMU == mmu_type) {
423 if (kgsl_mmu_is_perprocess())
424 return KGSL_IOMMU_GLOBAL_MEM_BASE
425 - kgsl_mmu_get_base_addr() - SZ_1M;
426 else
427 return SZ_2G;
428 }
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600429 return 0;
430}
431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432#endif /* __KGSL_MMU_H */