Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | #ifndef __KGSL_MMU_H |
| 14 | #define __KGSL_MMU_H |
| 15 | |
| 16 | /* Identifier for the global page table */ |
| 17 | /* Per process page tables will probably pass in the thread group |
| 18 | as an identifier */ |
| 19 | |
| 20 | #define KGSL_MMU_GLOBAL_PT 0 |
| 21 | |
| 22 | #define GSL_PT_SUPER_PTE 8 |
| 23 | #define GSL_PT_PAGE_WV 0x00000001 |
| 24 | #define GSL_PT_PAGE_RV 0x00000002 |
| 25 | #define GSL_PT_PAGE_DIRTY 0x00000004 |
| 26 | |
| 27 | /* MMU registers - the register locations for all cores are the |
| 28 | same. The method for getting to those locations differs between |
| 29 | 2D and 3D, but the 2D and 3D register functions do that magic |
| 30 | for us */ |
| 31 | |
| 32 | #define MH_MMU_CONFIG 0x0040 |
| 33 | #define MH_MMU_VA_RANGE 0x0041 |
| 34 | #define MH_MMU_PT_BASE 0x0042 |
| 35 | #define MH_MMU_PAGE_FAULT 0x0043 |
| 36 | #define MH_MMU_TRAN_ERROR 0x0044 |
| 37 | #define MH_MMU_INVALIDATE 0x0045 |
| 38 | #define MH_MMU_MPU_BASE 0x0046 |
| 39 | #define MH_MMU_MPU_END 0x0047 |
| 40 | |
| 41 | #define MH_INTERRUPT_MASK 0x0A42 |
| 42 | #define MH_INTERRUPT_STATUS 0x0A43 |
| 43 | #define MH_INTERRUPT_CLEAR 0x0A44 |
| 44 | #define MH_AXI_ERROR 0x0A45 |
Jeremy Gebben | 4e8aada | 2011-07-12 10:07:47 -0600 | [diff] [blame] | 45 | #define MH_ARBITER_CONFIG 0x0A40 |
| 46 | #define MH_DEBUG_CTRL 0x0A4E |
| 47 | #define MH_DEBUG_DATA 0x0A4F |
| 48 | #define MH_AXI_HALT_CONTROL 0x0A50 |
| 49 | #define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54 |
| 50 | #define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55 |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 51 | |
| 52 | /* MH_MMU_CONFIG bit definitions */ |
| 53 | |
| 54 | #define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004 |
| 55 | #define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006 |
| 56 | #define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008 |
| 57 | #define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a |
| 58 | #define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c |
| 59 | #define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e |
| 60 | #define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010 |
| 61 | #define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012 |
| 62 | #define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014 |
| 63 | #define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016 |
| 64 | #define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018 |
| 65 | |
| 66 | /* MMU Flags */ |
| 67 | #define KGSL_MMUFLAGS_TLBFLUSH 0x10000000 |
| 68 | #define KGSL_MMUFLAGS_PTUPDATE 0x20000000 |
| 69 | |
| 70 | #define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L |
| 71 | #define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L |
| 72 | #define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L |
| 73 | |
| 74 | #ifdef CONFIG_MSM_KGSL_MMU |
| 75 | #define KGSL_MMU_INT_MASK \ |
| 76 | (MH_INTERRUPT_MASK__AXI_READ_ERROR | \ |
| 77 | MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \ |
| 78 | MH_INTERRUPT_MASK__MMU_PAGE_FAULT) |
| 79 | #else |
| 80 | #define KGSL_MMU_INT_MASK \ |
| 81 | (MH_INTERRUPT_MASK__AXI_READ_ERROR | \ |
| 82 | MH_INTERRUPT_MASK__AXI_WRITE_ERROR) |
| 83 | #endif |
| 84 | |
| 85 | /* Macros to manage TLB flushing */ |
| 86 | #define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8) |
| 87 | #define GSL_TLBFLUSH_FILTER_GET(superpte) \ |
| 88 | (*((unsigned char *) \ |
| 89 | (((unsigned int)pagetable->tlbflushfilter.base) \ |
| 90 | + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))) |
| 91 | #define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \ |
| 92 | (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \ |
| 93 | (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)) |
| 94 | #define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \ |
| 95 | (GSL_TLBFLUSH_FILTER_GET((superpte)) & \ |
| 96 | (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))) |
| 97 | #define GSL_TLBFLUSH_FILTER_RESET() memset(pagetable->tlbflushfilter.base,\ |
| 98 | 0, pagetable->tlbflushfilter.size) |
| 99 | |
| 100 | |
| 101 | struct kgsl_device; |
| 102 | |
| 103 | struct kgsl_tlbflushfilter { |
| 104 | unsigned int *base; |
| 105 | unsigned int size; |
| 106 | }; |
| 107 | |
| 108 | struct kgsl_pagetable { |
| 109 | spinlock_t lock; |
| 110 | struct kref refcount; |
| 111 | struct kgsl_memdesc base; |
| 112 | uint32_t va_base; |
| 113 | unsigned int va_range; |
| 114 | unsigned int last_superpte; |
| 115 | unsigned int max_entries; |
| 116 | struct gen_pool *pool; |
| 117 | struct list_head list; |
| 118 | unsigned int name; |
| 119 | /* Maintain filter to manage tlb flushing */ |
| 120 | struct kgsl_tlbflushfilter tlbflushfilter; |
| 121 | unsigned int tlb_flags; |
| 122 | struct kobject *kobj; |
| 123 | |
| 124 | struct { |
| 125 | unsigned int entries; |
| 126 | unsigned int mapped; |
| 127 | unsigned int max_mapped; |
| 128 | unsigned int max_entries; |
| 129 | } stats; |
| 130 | }; |
| 131 | |
| 132 | struct kgsl_mmu { |
| 133 | unsigned int refcnt; |
| 134 | uint32_t flags; |
| 135 | struct kgsl_device *device; |
| 136 | unsigned int config; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 137 | struct kgsl_memdesc dummyspace; |
| 138 | /* current page table object being used by device mmu */ |
| 139 | struct kgsl_pagetable *defaultpagetable; |
| 140 | struct kgsl_pagetable *hwpagetable; |
| 141 | }; |
| 142 | |
| 143 | struct kgsl_ptpool_chunk { |
| 144 | size_t size; |
| 145 | unsigned int count; |
| 146 | int dynamic; |
| 147 | |
| 148 | void *data; |
| 149 | unsigned int phys; |
| 150 | |
| 151 | unsigned long *bitmap; |
| 152 | struct list_head list; |
| 153 | }; |
| 154 | |
| 155 | struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name); |
| 156 | |
Jeremy Gebben | 4e8aada | 2011-07-12 10:07:47 -0600 | [diff] [blame] | 157 | void kgsl_mh_start(struct kgsl_device *device); |
| 158 | void kgsl_mh_intrcallback(struct kgsl_device *device); |
| 159 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 160 | #ifdef CONFIG_MSM_KGSL_MMU |
| 161 | |
| 162 | int kgsl_mmu_init(struct kgsl_device *device); |
| 163 | int kgsl_mmu_start(struct kgsl_device *device); |
| 164 | int kgsl_mmu_stop(struct kgsl_device *device); |
| 165 | int kgsl_mmu_close(struct kgsl_device *device); |
| 166 | void kgsl_mmu_setstate(struct kgsl_device *device, |
| 167 | struct kgsl_pagetable *pagetable); |
| 168 | int kgsl_mmu_map(struct kgsl_pagetable *pagetable, |
| 169 | struct kgsl_memdesc *memdesc, |
| 170 | unsigned int protflags); |
| 171 | int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable, |
| 172 | struct kgsl_memdesc *memdesc, unsigned int protflags); |
| 173 | int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, |
| 174 | struct kgsl_memdesc *memdesc); |
| 175 | void kgsl_ptpool_destroy(struct kgsl_ptpool *pool); |
| 176 | int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 177 | void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable); |
| 178 | unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr); |
| 179 | void kgsl_setstate(struct kgsl_device *device, uint32_t flags); |
| 180 | void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags); |
Sushmita Susheelendra | 354d971 | 2011-07-28 17:16:49 -0600 | [diff] [blame] | 181 | int kgsl_get_ptname_from_ptbase(unsigned int pt_base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 182 | |
| 183 | static inline int kgsl_mmu_enabled(void) |
| 184 | { |
| 185 | return 1; |
| 186 | } |
| 187 | |
| 188 | #else |
| 189 | |
| 190 | static inline int kgsl_mmu_enabled(void) |
| 191 | { |
| 192 | return 0; |
| 193 | } |
| 194 | |
| 195 | static inline int kgsl_mmu_init(struct kgsl_device *device) |
| 196 | { |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | static inline int kgsl_mmu_start(struct kgsl_device *device) |
| 201 | { |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | static inline int kgsl_mmu_stop(struct kgsl_device *device) |
| 206 | { |
| 207 | return 0; |
| 208 | } |
| 209 | |
| 210 | static inline int kgsl_mmu_close(struct kgsl_device *device) |
| 211 | { |
| 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | static inline void kgsl_mmu_setstate(struct kgsl_device *device, |
| 216 | struct kgsl_pagetable *pagetable) { } |
| 217 | |
| 218 | static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable, |
| 219 | struct kgsl_memdesc *memdesc, |
| 220 | unsigned int protflags) |
| 221 | { |
| 222 | memdesc->gpuaddr = memdesc->physaddr; |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, |
| 227 | struct kgsl_memdesc *memdesc) |
| 228 | { |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | static inline int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, |
| 233 | int entries) |
| 234 | { |
| 235 | return 0; |
| 236 | } |
| 237 | |
| 238 | static inline int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable, |
| 239 | struct kgsl_memdesc *memdesc, unsigned int protflags) |
| 240 | { |
| 241 | memdesc->gpuaddr = memdesc->physaddr; |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static inline void kgsl_ptpool_destroy(struct kgsl_ptpool *pool) { } |
| 246 | |
| 247 | static inline void kgsl_mh_intrcallback(struct kgsl_device *device) { } |
| 248 | |
| 249 | static inline void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { } |
| 250 | |
| 251 | static inline unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr) |
| 252 | { |
| 253 | return 0; |
| 254 | } |
| 255 | |
| 256 | static inline void kgsl_setstate(struct kgsl_device *device, uint32_t flags) |
| 257 | { } |
| 258 | |
| 259 | static inline void kgsl_default_setstate(struct kgsl_device *device, |
| 260 | uint32_t flags) { } |
Sushmita Susheelendra | 354d971 | 2011-07-28 17:16:49 -0600 | [diff] [blame] | 261 | |
| 262 | static inline int kgsl_get_ptname_from_ptbase(unsigned int pt_base) { } |
| 263 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 264 | #endif |
| 265 | |
| 266 | static inline unsigned int kgsl_pt_get_flags(struct kgsl_pagetable *pt, |
| 267 | enum kgsl_deviceid id) |
| 268 | { |
| 269 | unsigned int result = 0; |
| 270 | |
| 271 | if (pt == NULL) |
| 272 | return 0; |
| 273 | |
| 274 | spin_lock(&pt->lock); |
| 275 | if (pt->tlb_flags && (1<<id)) { |
| 276 | result = KGSL_MMUFLAGS_TLBFLUSH; |
| 277 | pt->tlb_flags &= ~(1<<id); |
| 278 | } |
| 279 | spin_unlock(&pt->lock); |
| 280 | return result; |
| 281 | } |
| 282 | |
| 283 | #endif /* __KGSL_MMU_H */ |