Duy Truong | e833aca | 2013-02-12 13:35:08 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 13 | |
| 14 | #include <linux/export.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
| 16 | #include <linux/memory_alloc.h> |
| 17 | #include <asm/cacheflush.h> |
Anshuman Dani | eecd520 | 2012-02-17 19:52:49 +0530 | [diff] [blame] | 18 | #include <linux/slab.h> |
| 19 | #include <linux/kmemleak.h> |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 20 | #include <linux/highmem.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 21 | |
| 22 | #include "kgsl.h" |
| 23 | #include "kgsl_sharedmem.h" |
| 24 | #include "kgsl_cffdump.h" |
| 25 | #include "kgsl_device.h" |
| 26 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 27 | /* An attribute for showing per-process memory statistics */ |
| 28 | struct kgsl_mem_entry_attribute { |
| 29 | struct attribute attr; |
| 30 | int memtype; |
| 31 | ssize_t (*show)(struct kgsl_process_private *priv, |
| 32 | int type, char *buf); |
| 33 | }; |
| 34 | |
| 35 | #define to_mem_entry_attr(a) \ |
| 36 | container_of(a, struct kgsl_mem_entry_attribute, attr) |
| 37 | |
| 38 | #define __MEM_ENTRY_ATTR(_type, _name, _show) \ |
| 39 | { \ |
| 40 | .attr = { .name = __stringify(_name), .mode = 0444 }, \ |
| 41 | .memtype = _type, \ |
| 42 | .show = _show, \ |
| 43 | } |
| 44 | |
| 45 | /* |
| 46 | * A structure to hold the attributes for a particular memory type. |
| 47 | * For each memory type in each process we store the current and maximum |
| 48 | * memory usage and display the counts in sysfs. This structure and |
| 49 | * the following macro allow us to simplify the definition for those |
| 50 | * adding new memory types |
| 51 | */ |
| 52 | |
| 53 | struct mem_entry_stats { |
| 54 | int memtype; |
| 55 | struct kgsl_mem_entry_attribute attr; |
| 56 | struct kgsl_mem_entry_attribute max_attr; |
| 57 | }; |
| 58 | |
| 59 | |
| 60 | #define MEM_ENTRY_STAT(_type, _name) \ |
| 61 | { \ |
| 62 | .memtype = _type, \ |
| 63 | .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \ |
| 64 | .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \ |
| 65 | mem_entry_max_show), \ |
| 66 | } |
| 67 | |
| 68 | |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 69 | /* |
| 70 | * One page allocation for a guard region to protect against over-zealous |
| 71 | * GPU pre-fetch |
| 72 | */ |
| 73 | |
| 74 | static struct page *kgsl_guard_page; |
| 75 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 76 | /** |
| 77 | * Given a kobj, find the process structure attached to it |
| 78 | */ |
| 79 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 80 | static struct kgsl_process_private * |
| 81 | _get_priv_from_kobj(struct kobject *kobj) |
| 82 | { |
| 83 | struct kgsl_process_private *private; |
| 84 | unsigned long name; |
| 85 | |
| 86 | if (!kobj) |
| 87 | return NULL; |
| 88 | |
| 89 | if (sscanf(kobj->name, "%ld", &name) != 1) |
| 90 | return NULL; |
| 91 | |
| 92 | list_for_each_entry(private, &kgsl_driver.process_list, list) { |
| 93 | if (private->pid == name) |
| 94 | return private; |
| 95 | } |
| 96 | |
| 97 | return NULL; |
| 98 | } |
| 99 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 100 | /** |
| 101 | * Show the current amount of memory allocated for the given memtype |
| 102 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 103 | |
| 104 | static ssize_t |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 105 | mem_entry_show(struct kgsl_process_private *priv, int type, char *buf) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 106 | { |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 107 | return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur); |
| 108 | } |
| 109 | |
| 110 | /** |
| 111 | * Show the maximum memory allocated for the given memtype through the life of |
| 112 | * the process |
| 113 | */ |
| 114 | |
| 115 | static ssize_t |
| 116 | mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf) |
| 117 | { |
| 118 | return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max); |
| 119 | } |
| 120 | |
| 121 | |
| 122 | static void mem_entry_sysfs_release(struct kobject *kobj) |
| 123 | { |
| 124 | } |
| 125 | |
| 126 | static ssize_t mem_entry_sysfs_show(struct kobject *kobj, |
| 127 | struct attribute *attr, char *buf) |
| 128 | { |
| 129 | struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 130 | struct kgsl_process_private *priv; |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 131 | ssize_t ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 132 | |
| 133 | mutex_lock(&kgsl_driver.process_mutex); |
| 134 | priv = _get_priv_from_kobj(kobj); |
| 135 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 136 | if (priv && pattr->show) |
| 137 | ret = pattr->show(priv, pattr->memtype, buf); |
| 138 | else |
| 139 | ret = -EIO; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 140 | |
| 141 | mutex_unlock(&kgsl_driver.process_mutex); |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 142 | return ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 143 | } |
| 144 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 145 | static const struct sysfs_ops mem_entry_sysfs_ops = { |
| 146 | .show = mem_entry_sysfs_show, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 147 | }; |
| 148 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 149 | static struct kobj_type ktype_mem_entry = { |
| 150 | .sysfs_ops = &mem_entry_sysfs_ops, |
| 151 | .default_attrs = NULL, |
| 152 | .release = mem_entry_sysfs_release |
| 153 | }; |
| 154 | |
| 155 | static struct mem_entry_stats mem_stats[] = { |
| 156 | MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel), |
| 157 | #ifdef CONFIG_ANDROID_PMEM |
| 158 | MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem), |
| 159 | #endif |
| 160 | #ifdef CONFIG_ASHMEM |
| 161 | MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem), |
| 162 | #endif |
| 163 | MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user), |
Jordan Crouse | 8eab35a | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 164 | #ifdef CONFIG_ION |
Jeremy Gebben | ff6eab0 | 2012-01-09 09:42:21 -0700 | [diff] [blame] | 165 | MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion), |
Jordan Crouse | 8eab35a | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 166 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 167 | }; |
| 168 | |
| 169 | void |
| 170 | kgsl_process_uninit_sysfs(struct kgsl_process_private *private) |
| 171 | { |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 172 | int i; |
| 173 | |
| 174 | for (i = 0; i < ARRAY_SIZE(mem_stats); i++) { |
| 175 | sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr); |
| 176 | sysfs_remove_file(&private->kobj, |
| 177 | &mem_stats[i].max_attr.attr); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 178 | } |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 179 | |
| 180 | kobject_put(&private->kobj); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | void |
| 184 | kgsl_process_init_sysfs(struct kgsl_process_private *private) |
| 185 | { |
| 186 | unsigned char name[16]; |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 187 | int i, ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 188 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 189 | snprintf(name, sizeof(name), "%d", private->pid); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 190 | |
Jordan Crouse | 1b897cf | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 191 | if (kobject_init_and_add(&private->kobj, &ktype_mem_entry, |
| 192 | kgsl_driver.prockobj, name)) |
| 193 | return; |
| 194 | |
| 195 | for (i = 0; i < ARRAY_SIZE(mem_stats); i++) { |
| 196 | /* We need to check the value of sysfs_create_file, but we |
| 197 | * don't really care if it passed or not */ |
| 198 | |
| 199 | ret = sysfs_create_file(&private->kobj, |
| 200 | &mem_stats[i].attr.attr); |
| 201 | ret = sysfs_create_file(&private->kobj, |
| 202 | &mem_stats[i].max_attr.attr); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 203 | } |
| 204 | } |
| 205 | |
| 206 | static int kgsl_drv_memstat_show(struct device *dev, |
| 207 | struct device_attribute *attr, |
| 208 | char *buf) |
| 209 | { |
| 210 | unsigned int val = 0; |
| 211 | |
| 212 | if (!strncmp(attr->attr.name, "vmalloc", 7)) |
| 213 | val = kgsl_driver.stats.vmalloc; |
| 214 | else if (!strncmp(attr->attr.name, "vmalloc_max", 11)) |
| 215 | val = kgsl_driver.stats.vmalloc_max; |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 216 | else if (!strncmp(attr->attr.name, "page_alloc", 10)) |
| 217 | val = kgsl_driver.stats.page_alloc; |
| 218 | else if (!strncmp(attr->attr.name, "page_alloc_max", 14)) |
| 219 | val = kgsl_driver.stats.page_alloc_max; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 220 | else if (!strncmp(attr->attr.name, "coherent", 8)) |
| 221 | val = kgsl_driver.stats.coherent; |
| 222 | else if (!strncmp(attr->attr.name, "coherent_max", 12)) |
| 223 | val = kgsl_driver.stats.coherent_max; |
| 224 | else if (!strncmp(attr->attr.name, "mapped", 6)) |
| 225 | val = kgsl_driver.stats.mapped; |
| 226 | else if (!strncmp(attr->attr.name, "mapped_max", 10)) |
| 227 | val = kgsl_driver.stats.mapped_max; |
| 228 | |
| 229 | return snprintf(buf, PAGE_SIZE, "%u\n", val); |
| 230 | } |
| 231 | |
| 232 | static int kgsl_drv_histogram_show(struct device *dev, |
| 233 | struct device_attribute *attr, |
| 234 | char *buf) |
| 235 | { |
| 236 | int len = 0; |
| 237 | int i; |
| 238 | |
| 239 | for (i = 0; i < 16; i++) |
Jeremy Gebben | a87bb86 | 2011-08-08 16:09:38 -0600 | [diff] [blame] | 240 | len += snprintf(buf + len, PAGE_SIZE - len, "%d ", |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 241 | kgsl_driver.stats.histogram[i]); |
| 242 | |
Jeremy Gebben | a87bb86 | 2011-08-08 16:09:38 -0600 | [diff] [blame] | 243 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 244 | return len; |
| 245 | } |
| 246 | |
| 247 | DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL); |
| 248 | DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL); |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 249 | DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL); |
| 250 | DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 251 | DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL); |
| 252 | DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL); |
| 253 | DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL); |
| 254 | DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL); |
| 255 | DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL); |
| 256 | |
| 257 | static const struct device_attribute *drv_attr_list[] = { |
| 258 | &dev_attr_vmalloc, |
| 259 | &dev_attr_vmalloc_max, |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 260 | &dev_attr_page_alloc, |
| 261 | &dev_attr_page_alloc_max, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 262 | &dev_attr_coherent, |
| 263 | &dev_attr_coherent_max, |
| 264 | &dev_attr_mapped, |
| 265 | &dev_attr_mapped_max, |
| 266 | &dev_attr_histogram, |
| 267 | NULL |
| 268 | }; |
| 269 | |
| 270 | void |
| 271 | kgsl_sharedmem_uninit_sysfs(void) |
| 272 | { |
| 273 | kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list); |
| 274 | } |
| 275 | |
| 276 | int |
| 277 | kgsl_sharedmem_init_sysfs(void) |
| 278 | { |
| 279 | return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev, |
| 280 | drv_attr_list); |
| 281 | } |
| 282 | |
| 283 | #ifdef CONFIG_OUTER_CACHE |
| 284 | static void _outer_cache_range_op(int op, unsigned long addr, size_t size) |
| 285 | { |
| 286 | switch (op) { |
| 287 | case KGSL_CACHE_OP_FLUSH: |
| 288 | outer_flush_range(addr, addr + size); |
| 289 | break; |
| 290 | case KGSL_CACHE_OP_CLEAN: |
| 291 | outer_clean_range(addr, addr + size); |
| 292 | break; |
| 293 | case KGSL_CACHE_OP_INV: |
| 294 | outer_inv_range(addr, addr + size); |
| 295 | break; |
| 296 | } |
| 297 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 298 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 299 | static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 300 | { |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 301 | struct scatterlist *s; |
| 302 | int i; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 303 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 304 | for_each_sg(sg, s, sglen, i) { |
Jeremy Gebben | 582fe31 | 2012-03-23 10:19:44 -0600 | [diff] [blame] | 305 | unsigned int paddr = kgsl_get_sg_pa(s); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 306 | _outer_cache_range_op(op, paddr, s->length); |
| 307 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 308 | } |
| 309 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 310 | #else |
| 311 | static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 312 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 313 | } |
| 314 | #endif |
| 315 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 316 | static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 317 | struct vm_area_struct *vma, |
| 318 | struct vm_fault *vmf) |
| 319 | { |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 320 | int i, pgoff; |
| 321 | struct scatterlist *s = memdesc->sg; |
| 322 | unsigned int offset; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 323 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 324 | offset = ((unsigned long) vmf->virtual_address - vma->vm_start); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 325 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 326 | if (offset >= memdesc->size) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 327 | return VM_FAULT_SIGBUS; |
| 328 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 329 | pgoff = offset >> PAGE_SHIFT; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 330 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 331 | /* |
| 332 | * The sglist might be comprised of mixed blocks of memory depending |
| 333 | * on how many 64K pages were allocated. This means we have to do math |
| 334 | * to find the actual 4K page to map in user space |
| 335 | */ |
| 336 | |
| 337 | for (i = 0; i < memdesc->sglen; i++) { |
| 338 | int npages = s->length >> PAGE_SHIFT; |
| 339 | |
| 340 | if (pgoff < npages) { |
| 341 | struct page *page = sg_page(s); |
| 342 | |
| 343 | page = nth_page(page, pgoff); |
| 344 | |
| 345 | get_page(page); |
| 346 | vmf->page = page; |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | pgoff -= npages; |
| 352 | s = sg_next(s); |
| 353 | } |
| 354 | |
| 355 | return VM_FAULT_SIGBUS; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 356 | } |
| 357 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 358 | static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 359 | { |
| 360 | return VM_RESERVED | VM_DONTEXPAND; |
| 361 | } |
| 362 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 363 | static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 364 | { |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 365 | int i = 0; |
| 366 | struct scatterlist *sg; |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 367 | int sglen = memdesc->sglen; |
| 368 | |
| 369 | /* Don't free the guard page if it was used */ |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 370 | if (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 371 | sglen--; |
| 372 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 373 | kgsl_driver.stats.page_alloc -= memdesc->size; |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 374 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 375 | if (memdesc->hostptr) { |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 376 | vunmap(memdesc->hostptr); |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 377 | kgsl_driver.stats.vmalloc -= memdesc->size; |
| 378 | } |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 379 | if (memdesc->sg) |
Dhivya Subramanian | 24866ee | 2013-02-07 12:24:26 -0800 | [diff] [blame] | 380 | for_each_sg(memdesc->sg, sg, sglen, i){ |
| 381 | if (sg->length == 0) |
| 382 | break; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 383 | __free_pages(sg_page(sg), get_order(sg->length)); |
Dhivya Subramanian | 24866ee | 2013-02-07 12:24:26 -0800 | [diff] [blame] | 384 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc) |
| 388 | { |
| 389 | return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; |
| 390 | } |
| 391 | |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 392 | /* |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 393 | * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address |
| 394 | * space |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 395 | * |
| 396 | * @memdesc - The memory descriptor which contains information about the memory |
| 397 | * |
| 398 | * Return: 0 on success else error code |
| 399 | */ |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 400 | static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc) |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 401 | { |
| 402 | if (!memdesc->hostptr) { |
| 403 | pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL); |
| 404 | struct page **pages = NULL; |
| 405 | struct scatterlist *sg; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 406 | int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT; |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 407 | int sglen = memdesc->sglen; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 408 | int i, count = 0; |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 409 | |
| 410 | /* Don't map the guard page if it exists */ |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 411 | if (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 412 | sglen--; |
| 413 | |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 414 | /* create a list of pages to call vmap */ |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 415 | pages = vmalloc(npages * sizeof(struct page *)); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 416 | if (!pages) { |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 417 | KGSL_CORE_ERR("vmalloc(%d) failed\n", |
| 418 | npages * sizeof(struct page *)); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 419 | return -ENOMEM; |
| 420 | } |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 421 | |
| 422 | for_each_sg(memdesc->sg, sg, sglen, i) { |
| 423 | struct page *page = sg_page(sg); |
| 424 | int j; |
| 425 | |
| 426 | for (j = 0; j < sg->length >> PAGE_SHIFT; j++) |
| 427 | pages[count++] = page++; |
| 428 | } |
| 429 | |
| 430 | |
| 431 | memdesc->hostptr = vmap(pages, count, |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 432 | VM_IOREMAP, page_prot); |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 433 | KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc, |
| 434 | kgsl_driver.stats.vmalloc_max); |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 435 | vfree(pages); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 436 | } |
| 437 | if (!memdesc->hostptr) |
| 438 | return -ENOMEM; |
| 439 | |
| 440 | return 0; |
| 441 | } |
| 442 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 443 | static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc, |
| 444 | struct vm_area_struct *vma, |
| 445 | struct vm_fault *vmf) |
| 446 | { |
| 447 | unsigned long offset, pfn; |
| 448 | int ret; |
| 449 | |
| 450 | offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >> |
| 451 | PAGE_SHIFT; |
| 452 | |
| 453 | pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset; |
| 454 | ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn); |
| 455 | |
| 456 | if (ret == -ENOMEM || ret == -EAGAIN) |
| 457 | return VM_FAULT_OOM; |
| 458 | else if (ret == -EFAULT) |
| 459 | return VM_FAULT_SIGBUS; |
| 460 | |
| 461 | return VM_FAULT_NOPAGE; |
| 462 | } |
| 463 | |
| 464 | static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc) |
| 465 | |
| 466 | { |
| 467 | kgsl_driver.stats.coherent -= memdesc->size; |
| 468 | if (memdesc->hostptr) |
| 469 | iounmap(memdesc->hostptr); |
| 470 | |
| 471 | free_contiguous_memory_by_paddr(memdesc->physaddr); |
| 472 | } |
| 473 | |
Shubhraprakash Das | 387b669 | 2012-08-11 18:26:29 -0700 | [diff] [blame] | 474 | static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc) |
| 475 | { |
| 476 | if (!memdesc->hostptr) { |
| 477 | memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size); |
| 478 | if (!memdesc->hostptr) { |
| 479 | KGSL_CORE_ERR("ioremap failed, addr:0x%p, size:0x%x\n", |
| 480 | memdesc->hostptr, memdesc->size); |
| 481 | return -ENOMEM; |
| 482 | } |
| 483 | } |
| 484 | |
| 485 | return 0; |
| 486 | } |
| 487 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 488 | static void kgsl_coherent_free(struct kgsl_memdesc *memdesc) |
| 489 | { |
| 490 | kgsl_driver.stats.coherent -= memdesc->size; |
| 491 | dma_free_coherent(NULL, memdesc->size, |
| 492 | memdesc->hostptr, memdesc->physaddr); |
| 493 | } |
| 494 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 495 | /* Global - also used by kgsl_drm.c */ |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 496 | struct kgsl_memdesc_ops kgsl_page_alloc_ops = { |
| 497 | .free = kgsl_page_alloc_free, |
| 498 | .vmflags = kgsl_page_alloc_vmflags, |
| 499 | .vmfault = kgsl_page_alloc_vmfault, |
| 500 | .map_kernel_mem = kgsl_page_alloc_map_kernel, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 501 | }; |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 502 | EXPORT_SYMBOL(kgsl_page_alloc_ops); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 503 | |
| 504 | static struct kgsl_memdesc_ops kgsl_ebimem_ops = { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 505 | .free = kgsl_ebimem_free, |
| 506 | .vmflags = kgsl_contiguous_vmflags, |
| 507 | .vmfault = kgsl_contiguous_vmfault, |
Shubhraprakash Das | 387b669 | 2012-08-11 18:26:29 -0700 | [diff] [blame] | 508 | .map_kernel_mem = kgsl_ebimem_map_kernel, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 509 | }; |
| 510 | |
| 511 | static struct kgsl_memdesc_ops kgsl_coherent_ops = { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 512 | .free = kgsl_coherent_free, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 513 | }; |
| 514 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 515 | void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op) |
| 516 | { |
Jordan Crouse | e9efb0b | 2013-05-28 16:54:19 -0600 | [diff] [blame] | 517 | /* |
| 518 | * If the buffer is mapped in the kernel operate on that address |
| 519 | * otherwise use the user address |
| 520 | */ |
| 521 | |
| 522 | void *addr = (memdesc->hostptr) ? |
| 523 | memdesc->hostptr : (void *) memdesc->useraddr; |
| 524 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 525 | int size = memdesc->size; |
| 526 | |
Jeremy Gebben | d1f8c90 | 2013-05-28 16:53:45 -0600 | [diff] [blame] | 527 | if (addr != NULL) { |
| 528 | switch (op) { |
| 529 | case KGSL_CACHE_OP_FLUSH: |
| 530 | dmac_flush_range(addr, addr + size); |
| 531 | break; |
| 532 | case KGSL_CACHE_OP_CLEAN: |
| 533 | dmac_clean_range(addr, addr + size); |
| 534 | break; |
| 535 | case KGSL_CACHE_OP_INV: |
| 536 | dmac_inv_range(addr, addr + size); |
| 537 | break; |
| 538 | } |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 539 | } |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 540 | outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 541 | } |
| 542 | EXPORT_SYMBOL(kgsl_cache_range_op); |
| 543 | |
| 544 | static int |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 545 | _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 546 | struct kgsl_pagetable *pagetable, |
Jeremy Gebben | a46f427 | 2013-05-28 16:54:09 -0600 | [diff] [blame] | 547 | size_t size) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 548 | { |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 549 | int pcount = 0, order, ret = 0; |
| 550 | int j, len, page_size, sglen_alloc, sglen = 0; |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 551 | struct page **pages = NULL; |
| 552 | pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL); |
| 553 | void *ptr; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 554 | unsigned int align; |
Jordan Crouse | 2151cb9 | 2012-08-21 14:02:59 -0600 | [diff] [blame] | 555 | |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 556 | align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 557 | |
| 558 | page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K) |
| 559 | ? SZ_64K : PAGE_SIZE; |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 560 | /* update align flags for what we actually use */ |
Jeremy Gebben | fec05c2 | 2013-05-28 16:59:29 -0600 | [diff] [blame^] | 561 | if (page_size != PAGE_SIZE) |
| 562 | kgsl_memdesc_set_align(memdesc, ilog2(page_size)); |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 563 | |
| 564 | /* |
| 565 | * There needs to be enough room in the sg structure to be able to |
| 566 | * service the allocation entirely with PAGE_SIZE sized chunks |
| 567 | */ |
| 568 | |
| 569 | sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 570 | |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 571 | /* |
| 572 | * Add guard page to the end of the allocation when the |
| 573 | * IOMMU is in use. |
| 574 | */ |
| 575 | |
| 576 | if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 577 | sglen_alloc++; |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 578 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 579 | memdesc->size = size; |
| 580 | memdesc->pagetable = pagetable; |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 581 | memdesc->ops = &kgsl_page_alloc_ops; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 582 | |
Jordan Crouse | 100e4b3 | 2013-03-22 11:57:54 -0600 | [diff] [blame] | 583 | memdesc->sglen_alloc = sglen_alloc; |
| 584 | memdesc->sg = kgsl_sg_alloc(memdesc->sglen_alloc); |
Jordan Crouse | a652a07 | 2012-04-06 16:26:33 -0600 | [diff] [blame] | 585 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 586 | if (memdesc->sg == NULL) { |
| 587 | ret = -ENOMEM; |
| 588 | goto done; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 589 | } |
| 590 | |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 591 | /* |
| 592 | * Allocate space to store the list of pages to send to vmap. |
| 593 | * This is an array of pointers so we can track 1024 pages per page of |
| 594 | * allocation which means we can handle up to a 8MB buffer request with |
| 595 | * two pages; well within the acceptable limits for using kmalloc. |
| 596 | */ |
| 597 | |
Jordan Crouse | 100e4b3 | 2013-03-22 11:57:54 -0600 | [diff] [blame] | 598 | pages = kmalloc(memdesc->sglen_alloc * sizeof(struct page *), |
| 599 | GFP_KERNEL); |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 600 | |
| 601 | if (pages == NULL) { |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 602 | ret = -ENOMEM; |
| 603 | goto done; |
| 604 | } |
| 605 | |
Anshuman Dani | eecd520 | 2012-02-17 19:52:49 +0530 | [diff] [blame] | 606 | kmemleak_not_leak(memdesc->sg); |
| 607 | |
Jordan Crouse | 100e4b3 | 2013-03-22 11:57:54 -0600 | [diff] [blame] | 608 | sg_init_table(memdesc->sg, memdesc->sglen_alloc); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 609 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 610 | len = size; |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 611 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 612 | while (len > 0) { |
| 613 | struct page *page; |
Jordan Crouse | 7ec25d8 | 2013-04-05 16:22:59 -0600 | [diff] [blame] | 614 | unsigned int gfp_mask = __GFP_HIGHMEM; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 615 | int j; |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 616 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 617 | /* don't waste space at the end of the allocation*/ |
| 618 | if (len < page_size) |
| 619 | page_size = PAGE_SIZE; |
| 620 | |
Jordan Crouse | 7ec25d8 | 2013-04-05 16:22:59 -0600 | [diff] [blame] | 621 | /* |
| 622 | * Don't do some of the more aggressive memory recovery |
| 623 | * techniques for large order allocations |
| 624 | */ |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 625 | if (page_size != PAGE_SIZE) |
Jordan Crouse | 7ec25d8 | 2013-04-05 16:22:59 -0600 | [diff] [blame] | 626 | gfp_mask |= __GFP_COMP | __GFP_NORETRY | |
| 627 | __GFP_NO_KSWAPD | __GFP_NOWARN; |
| 628 | else |
Olav Haugan | 6f53984 | 2013-05-17 13:35:23 -0700 | [diff] [blame] | 629 | gfp_mask |= GFP_KERNEL; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 630 | |
| 631 | page = alloc_pages(gfp_mask, get_order(page_size)); |
| 632 | |
| 633 | if (page == NULL) { |
| 634 | if (page_size != PAGE_SIZE) { |
| 635 | page_size = PAGE_SIZE; |
| 636 | continue; |
| 637 | } |
Jordan Crouse | 97c74d6 | 2012-11-01 12:42:42 -0600 | [diff] [blame] | 638 | |
| 639 | KGSL_CORE_ERR( |
| 640 | "Out of memory: only allocated %dKB of %dKB requested\n", |
| 641 | (size - len) >> 10, size >> 10); |
| 642 | |
| 643 | ret = -ENOMEM; |
| 644 | goto done; |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 645 | } |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 646 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 647 | for (j = 0; j < page_size >> PAGE_SHIFT; j++) |
| 648 | pages[pcount++] = nth_page(page, j); |
| 649 | |
| 650 | sg_set_page(&memdesc->sg[sglen++], page, page_size, 0); |
| 651 | len -= page_size; |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 652 | } |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 653 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 654 | /* Add the guard page to the end of the sglist */ |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 655 | |
| 656 | if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) { |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 657 | /* |
| 658 | * It doesn't matter if we use GFP_ZERO here, this never |
| 659 | * gets mapped, and we only allocate it once in the life |
| 660 | * of the system |
| 661 | */ |
| 662 | |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 663 | if (kgsl_guard_page == NULL) |
| 664 | kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO | |
| 665 | __GFP_HIGHMEM); |
| 666 | |
| 667 | if (kgsl_guard_page != NULL) { |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 668 | sg_set_page(&memdesc->sg[sglen++], kgsl_guard_page, |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 669 | PAGE_SIZE, 0); |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 670 | memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE; |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 671 | } |
Jordan Crouse | 7d3139b | 2012-05-18 10:05:02 -0600 | [diff] [blame] | 672 | } |
| 673 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 674 | memdesc->sglen = sglen; |
| 675 | |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 676 | /* |
| 677 | * All memory that goes to the user has to be zeroed out before it gets |
| 678 | * exposed to userspace. This means that the memory has to be mapped in |
| 679 | * the kernel, zeroed (memset) and then unmapped. This also means that |
| 680 | * the dcache has to be flushed to ensure coherency between the kernel |
| 681 | * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped |
| 682 | * zeroed and unmaped each individual page, and then we had to turn |
| 683 | * around and call flush_dcache_page() on that page to clear the caches. |
| 684 | * This was killing us for performance. Instead, we found it is much |
| 685 | * faster to allocate the pages without GFP_ZERO, map the entire range, |
| 686 | * memset it, flush the range and then unmap - this results in a factor |
| 687 | * of 4 improvement for speed for large buffers. There is a small |
| 688 | * increase in speed for small buffers, but only on the order of a few |
| 689 | * microseconds at best. The only downside is that there needs to be |
| 690 | * enough temporary space in vmalloc to accomodate the map. This |
| 691 | * shouldn't be a problem, but if it happens, fall back to a much slower |
| 692 | * path |
| 693 | */ |
| 694 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 695 | ptr = vmap(pages, pcount, VM_IOREMAP, page_prot); |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 696 | |
| 697 | if (ptr != NULL) { |
| 698 | memset(ptr, 0, memdesc->size); |
| 699 | dmac_flush_range(ptr, ptr + memdesc->size); |
| 700 | vunmap(ptr); |
| 701 | } else { |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 702 | /* Very, very, very slow path */ |
| 703 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 704 | for (j = 0; j < pcount; j++) { |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 705 | ptr = kmap_atomic(pages[j]); |
| 706 | memset(ptr, 0, PAGE_SIZE); |
| 707 | dmac_flush_range(ptr, ptr + PAGE_SIZE); |
| 708 | kunmap_atomic(ptr); |
| 709 | } |
| 710 | } |
| 711 | |
Jeremy Gebben | 7018a21 | 2012-04-11 10:23:52 -0600 | [diff] [blame] | 712 | outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, |
| 713 | KGSL_CACHE_OP_FLUSH); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 714 | |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 715 | KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc, |
| 716 | kgsl_driver.stats.page_alloc_max); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 717 | |
| 718 | order = get_order(size); |
| 719 | |
| 720 | if (order < 16) |
| 721 | kgsl_driver.stats.histogram[order]++; |
| 722 | |
| 723 | done: |
Jordan Crouse | 89bd323 | 2012-07-02 17:50:15 -0600 | [diff] [blame] | 724 | kfree(pages); |
| 725 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 726 | if (ret) |
| 727 | kgsl_sharedmem_free(memdesc); |
| 728 | |
| 729 | return ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 730 | } |
| 731 | |
| 732 | int |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 733 | kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 734 | struct kgsl_pagetable *pagetable, size_t size) |
| 735 | { |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 736 | int ret = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 737 | BUG_ON(size == 0); |
| 738 | |
| 739 | size = ALIGN(size, PAGE_SIZE * 2); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 740 | |
Jeremy Gebben | a46f427 | 2013-05-28 16:54:09 -0600 | [diff] [blame] | 741 | ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 742 | if (!ret) |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 743 | ret = kgsl_page_alloc_map_kernel(memdesc); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 744 | if (ret) |
| 745 | kgsl_sharedmem_free(memdesc); |
| 746 | return ret; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 747 | } |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 748 | EXPORT_SYMBOL(kgsl_sharedmem_page_alloc); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 749 | |
| 750 | int |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 751 | kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 752 | struct kgsl_pagetable *pagetable, |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 753 | size_t size) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 754 | { |
Jeremy Gebben | a46f427 | 2013-05-28 16:54:09 -0600 | [diff] [blame] | 755 | return _kgsl_sharedmem_page_alloc(memdesc, pagetable, PAGE_ALIGN(size)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 756 | } |
Harsh Vardhan Dwivedi | f99c263 | 2012-03-15 14:17:11 -0600 | [diff] [blame] | 757 | EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 758 | |
| 759 | int |
| 760 | kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size) |
| 761 | { |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 762 | int result = 0; |
| 763 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 764 | size = ALIGN(size, PAGE_SIZE); |
| 765 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 766 | memdesc->size = size; |
| 767 | memdesc->ops = &kgsl_coherent_ops; |
| 768 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 769 | memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr, |
| 770 | GFP_KERNEL); |
| 771 | if (memdesc->hostptr == NULL) { |
| 772 | KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 773 | result = -ENOMEM; |
| 774 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 775 | } |
| 776 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 777 | result = memdesc_sg_phys(memdesc, memdesc->physaddr, size); |
| 778 | if (result) |
| 779 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 780 | |
| 781 | /* Record statistics */ |
| 782 | |
| 783 | KGSL_STATS_ADD(size, kgsl_driver.stats.coherent, |
| 784 | kgsl_driver.stats.coherent_max); |
| 785 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 786 | err: |
| 787 | if (result) |
| 788 | kgsl_sharedmem_free(memdesc); |
| 789 | |
| 790 | return result; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 791 | } |
| 792 | EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent); |
| 793 | |
| 794 | void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc) |
| 795 | { |
| 796 | if (memdesc == NULL || memdesc->size == 0) |
| 797 | return; |
| 798 | |
| 799 | if (memdesc->gpuaddr) |
| 800 | kgsl_mmu_unmap(memdesc->pagetable, memdesc); |
| 801 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 802 | if (memdesc->ops && memdesc->ops->free) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 803 | memdesc->ops->free(memdesc); |
| 804 | |
Rajeev Kulkarni | 8dfdc336 | 2012-11-22 00:22:32 -0800 | [diff] [blame] | 805 | kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc); |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 806 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 807 | memset(memdesc, 0, sizeof(*memdesc)); |
| 808 | } |
| 809 | EXPORT_SYMBOL(kgsl_sharedmem_free); |
| 810 | |
| 811 | static int |
| 812 | _kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc, |
| 813 | struct kgsl_pagetable *pagetable, size_t size) |
| 814 | { |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 815 | int result = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 816 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 817 | memdesc->size = size; |
| 818 | memdesc->pagetable = pagetable; |
| 819 | memdesc->ops = &kgsl_ebimem_ops; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 820 | memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K); |
| 821 | |
| 822 | if (memdesc->physaddr == 0) { |
| 823 | KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n", |
| 824 | size); |
| 825 | return -ENOMEM; |
| 826 | } |
| 827 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 828 | result = memdesc_sg_phys(memdesc, memdesc->physaddr, size); |
| 829 | |
| 830 | if (result) |
| 831 | goto err; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 832 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 833 | KGSL_STATS_ADD(size, kgsl_driver.stats.coherent, |
| 834 | kgsl_driver.stats.coherent_max); |
| 835 | |
Jordan Crouse | d17e9aa | 2011-10-12 16:57:48 -0600 | [diff] [blame] | 836 | err: |
| 837 | if (result) |
| 838 | kgsl_sharedmem_free(memdesc); |
| 839 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 840 | return result; |
| 841 | } |
| 842 | |
| 843 | int |
| 844 | kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc, |
| 845 | struct kgsl_pagetable *pagetable, |
Jordan Crouse | dc67dfb | 2012-10-25 09:41:46 -0600 | [diff] [blame] | 846 | size_t size) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 847 | { |
| 848 | size = ALIGN(size, PAGE_SIZE); |
| 849 | return _kgsl_sharedmem_ebimem(memdesc, pagetable, size); |
| 850 | } |
| 851 | EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user); |
| 852 | |
| 853 | int |
| 854 | kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc, |
| 855 | struct kgsl_pagetable *pagetable, size_t size) |
| 856 | { |
| 857 | int result; |
| 858 | size = ALIGN(size, 8192); |
| 859 | result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size); |
| 860 | |
| 861 | if (result) |
| 862 | return result; |
| 863 | |
| 864 | memdesc->hostptr = ioremap(memdesc->physaddr, size); |
| 865 | |
| 866 | if (memdesc->hostptr == NULL) { |
| 867 | KGSL_CORE_ERR("ioremap failed\n"); |
| 868 | kgsl_sharedmem_free(memdesc); |
| 869 | return -ENOMEM; |
| 870 | } |
| 871 | |
| 872 | return 0; |
| 873 | } |
| 874 | EXPORT_SYMBOL(kgsl_sharedmem_ebimem); |
| 875 | |
| 876 | int |
| 877 | kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc, |
| 878 | uint32_t *dst, |
| 879 | unsigned int offsetbytes) |
| 880 | { |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 881 | uint32_t *src; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 882 | BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL); |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 883 | WARN_ON(offsetbytes % sizeof(uint32_t) != 0); |
| 884 | if (offsetbytes % sizeof(uint32_t) != 0) |
| 885 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 886 | |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 887 | WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size); |
| 888 | if (offsetbytes + sizeof(uint32_t) > memdesc->size) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 889 | return -ERANGE; |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 890 | src = (uint32_t *)(memdesc->hostptr + offsetbytes); |
| 891 | *dst = *src; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 892 | return 0; |
| 893 | } |
| 894 | EXPORT_SYMBOL(kgsl_sharedmem_readl); |
| 895 | |
| 896 | int |
| 897 | kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc, |
| 898 | unsigned int offsetbytes, |
| 899 | uint32_t src) |
| 900 | { |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 901 | uint32_t *dst; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 902 | BUG_ON(memdesc == NULL || memdesc->hostptr == NULL); |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 903 | WARN_ON(offsetbytes % sizeof(uint32_t) != 0); |
| 904 | if (offsetbytes % sizeof(uint32_t) != 0) |
| 905 | return -EINVAL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 906 | |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 907 | WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size); |
| 908 | if (offsetbytes + sizeof(uint32_t) > memdesc->size) |
| 909 | return -ERANGE; |
Jeremy Gebben | a3d07a4 | 2011-10-17 12:08:16 -0600 | [diff] [blame] | 910 | kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, |
Jeremy Gebben | aba1327 | 2012-01-31 17:31:23 -0700 | [diff] [blame] | 911 | src, sizeof(uint32_t)); |
| 912 | dst = (uint32_t *)(memdesc->hostptr + offsetbytes); |
| 913 | *dst = src; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 914 | return 0; |
| 915 | } |
| 916 | EXPORT_SYMBOL(kgsl_sharedmem_writel); |
| 917 | |
| 918 | int |
| 919 | kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes, |
| 920 | unsigned int value, unsigned int sizebytes) |
| 921 | { |
| 922 | BUG_ON(memdesc == NULL || memdesc->hostptr == NULL); |
| 923 | BUG_ON(offsetbytes + sizebytes > memdesc->size); |
| 924 | |
Jeremy Gebben | a3d07a4 | 2011-10-17 12:08:16 -0600 | [diff] [blame] | 925 | kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value, |
| 926 | sizebytes); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 927 | memset(memdesc->hostptr + offsetbytes, value, sizebytes); |
| 928 | return 0; |
| 929 | } |
| 930 | EXPORT_SYMBOL(kgsl_sharedmem_set); |
Harsh Vardhan Dwivedi | 8cb835b | 2012-03-29 17:23:11 -0600 | [diff] [blame] | 931 | |
| 932 | /* |
| 933 | * kgsl_sharedmem_map_vma - Map a user vma to physical memory |
| 934 | * |
| 935 | * @vma - The user vma to map |
| 936 | * @memdesc - The memory descriptor which contains information about the |
| 937 | * physical memory |
| 938 | * |
| 939 | * Return: 0 on success else error code |
| 940 | */ |
| 941 | int |
| 942 | kgsl_sharedmem_map_vma(struct vm_area_struct *vma, |
| 943 | const struct kgsl_memdesc *memdesc) |
| 944 | { |
| 945 | unsigned long addr = vma->vm_start; |
| 946 | unsigned long size = vma->vm_end - vma->vm_start; |
| 947 | int ret, i = 0; |
| 948 | |
| 949 | if (!memdesc->sg || (size != memdesc->size) || |
| 950 | (memdesc->sglen != (size / PAGE_SIZE))) |
| 951 | return -EINVAL; |
| 952 | |
| 953 | for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) { |
| 954 | ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i])); |
| 955 | if (ret) |
| 956 | return ret; |
| 957 | } |
| 958 | return 0; |
| 959 | } |
| 960 | EXPORT_SYMBOL(kgsl_sharedmem_map_vma); |
Jeremy Gebben | 158a5c0 | 2012-09-24 14:27:25 -0600 | [diff] [blame] | 961 | |
| 962 | static const char * const memtype_str[] = { |
| 963 | [KGSL_MEMTYPE_OBJECTANY] = "any(0)", |
| 964 | [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer", |
| 965 | [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer", |
| 966 | [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer", |
| 967 | [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer", |
| 968 | [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer", |
| 969 | [KGSL_MEMTYPE_TEXTURE] = "texture", |
| 970 | [KGSL_MEMTYPE_SURFACE] = "surface", |
| 971 | [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface", |
| 972 | [KGSL_MEMTYPE_GL] = "gl", |
| 973 | [KGSL_MEMTYPE_CL] = "cl", |
| 974 | [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map", |
| 975 | [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap", |
| 976 | [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map", |
| 977 | [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap", |
| 978 | [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack", |
| 979 | [KGSL_MEMTYPE_COMMAND] = "command", |
| 980 | [KGSL_MEMTYPE_2D] = "2d", |
| 981 | [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image", |
| 982 | [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow", |
| 983 | [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample", |
| 984 | /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */ |
| 985 | }; |
| 986 | |
| 987 | void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags) |
| 988 | { |
| 989 | unsigned char type; |
| 990 | |
| 991 | type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT; |
| 992 | if (type == KGSL_MEMTYPE_KERNEL) |
| 993 | strlcpy(name, "kernel", name_size); |
| 994 | else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL) |
| 995 | strlcpy(name, memtype_str[type], name_size); |
| 996 | else |
| 997 | snprintf(name, name_size, "unknown(%3d)", type); |
| 998 | } |
| 999 | EXPORT_SYMBOL(kgsl_get_memory_usage); |