blob: 6513aad32bbdad2a66be0fa66d3d9bbd7455fc9b [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/vmalloc.h>
16#include <linux/memory_alloc.h>
17#include <asm/cacheflush.h>
Anshuman Danieecd5202012-02-17 19:52:49 +053018#include <linux/slab.h>
19#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_sharedmem.h"
23#include "kgsl_cffdump.h"
24#include "kgsl_device.h"
25
Jordan Crouse1b897cf2011-10-12 16:57:48 -060026/* An attribute for showing per-process memory statistics */
27struct kgsl_mem_entry_attribute {
28 struct attribute attr;
29 int memtype;
30 ssize_t (*show)(struct kgsl_process_private *priv,
31 int type, char *buf);
32};
33
34#define to_mem_entry_attr(a) \
35container_of(a, struct kgsl_mem_entry_attribute, attr)
36
37#define __MEM_ENTRY_ATTR(_type, _name, _show) \
38{ \
39 .attr = { .name = __stringify(_name), .mode = 0444 }, \
40 .memtype = _type, \
41 .show = _show, \
42}
43
44/*
45 * A structure to hold the attributes for a particular memory type.
46 * For each memory type in each process we store the current and maximum
47 * memory usage and display the counts in sysfs. This structure and
48 * the following macro allow us to simplify the definition for those
49 * adding new memory types
50 */
51
52struct mem_entry_stats {
53 int memtype;
54 struct kgsl_mem_entry_attribute attr;
55 struct kgsl_mem_entry_attribute max_attr;
56};
57
58
59#define MEM_ENTRY_STAT(_type, _name) \
60{ \
61 .memtype = _type, \
62 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
63 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
64 mem_entry_max_show), \
65}
66
67
Jordan Crouse7d3139b2012-05-18 10:05:02 -060068/*
69 * One page allocation for a guard region to protect against over-zealous
70 * GPU pre-fetch
71 */
72
73static struct page *kgsl_guard_page;
74
Jordan Crouse1b897cf2011-10-12 16:57:48 -060075/**
76 * Given a kobj, find the process structure attached to it
77 */
78
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079static struct kgsl_process_private *
80_get_priv_from_kobj(struct kobject *kobj)
81{
82 struct kgsl_process_private *private;
83 unsigned long name;
84
85 if (!kobj)
86 return NULL;
87
88 if (sscanf(kobj->name, "%ld", &name) != 1)
89 return NULL;
90
91 list_for_each_entry(private, &kgsl_driver.process_list, list) {
92 if (private->pid == name)
93 return private;
94 }
95
96 return NULL;
97}
98
Jordan Crouse1b897cf2011-10-12 16:57:48 -060099/**
100 * Show the current amount of memory allocated for the given memtype
101 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102
103static ssize_t
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600104mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600106 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
107}
108
109/**
110 * Show the maximum memory allocated for the given memtype through the life of
111 * the process
112 */
113
114static ssize_t
115mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
116{
117 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
118}
119
120
121static void mem_entry_sysfs_release(struct kobject *kobj)
122{
123}
124
125static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
126 struct attribute *attr, char *buf)
127{
128 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 struct kgsl_process_private *priv;
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600130 ssize_t ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131
132 mutex_lock(&kgsl_driver.process_mutex);
133 priv = _get_priv_from_kobj(kobj);
134
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600135 if (priv && pattr->show)
136 ret = pattr->show(priv, pattr->memtype, buf);
137 else
138 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139
140 mutex_unlock(&kgsl_driver.process_mutex);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600141 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142}
143
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600144static const struct sysfs_ops mem_entry_sysfs_ops = {
145 .show = mem_entry_sysfs_show,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146};
147
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600148static struct kobj_type ktype_mem_entry = {
149 .sysfs_ops = &mem_entry_sysfs_ops,
150 .default_attrs = NULL,
151 .release = mem_entry_sysfs_release
152};
153
154static struct mem_entry_stats mem_stats[] = {
155 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
156#ifdef CONFIG_ANDROID_PMEM
157 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
158#endif
159#ifdef CONFIG_ASHMEM
160 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
161#endif
162 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600163#ifdef CONFIG_ION
Jeremy Gebbenff6eab02012-01-09 09:42:21 -0700164 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600165#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166};
167
168void
169kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
170{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600171 int i;
172
173 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
174 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
175 sysfs_remove_file(&private->kobj,
176 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 }
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600178
179 kobject_put(&private->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180}
181
182void
183kgsl_process_init_sysfs(struct kgsl_process_private *private)
184{
185 unsigned char name[16];
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600186 int i, ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 snprintf(name, sizeof(name), "%d", private->pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600190 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
191 kgsl_driver.prockobj, name))
192 return;
193
194 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
195 /* We need to check the value of sysfs_create_file, but we
196 * don't really care if it passed or not */
197
198 ret = sysfs_create_file(&private->kobj,
199 &mem_stats[i].attr.attr);
200 ret = sysfs_create_file(&private->kobj,
201 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 }
203}
204
205static int kgsl_drv_memstat_show(struct device *dev,
206 struct device_attribute *attr,
207 char *buf)
208{
209 unsigned int val = 0;
210
211 if (!strncmp(attr->attr.name, "vmalloc", 7))
212 val = kgsl_driver.stats.vmalloc;
213 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
214 val = kgsl_driver.stats.vmalloc_max;
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600215 else if (!strncmp(attr->attr.name, "page_alloc", 10))
216 val = kgsl_driver.stats.page_alloc;
217 else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
218 val = kgsl_driver.stats.page_alloc_max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219 else if (!strncmp(attr->attr.name, "coherent", 8))
220 val = kgsl_driver.stats.coherent;
221 else if (!strncmp(attr->attr.name, "coherent_max", 12))
222 val = kgsl_driver.stats.coherent_max;
223 else if (!strncmp(attr->attr.name, "mapped", 6))
224 val = kgsl_driver.stats.mapped;
225 else if (!strncmp(attr->attr.name, "mapped_max", 10))
226 val = kgsl_driver.stats.mapped_max;
227
228 return snprintf(buf, PAGE_SIZE, "%u\n", val);
229}
230
231static int kgsl_drv_histogram_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 int len = 0;
236 int i;
237
238 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600239 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 kgsl_driver.stats.histogram[i]);
241
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600242 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 return len;
244}
245
246DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
247DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600248DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
249DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
251DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
252DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
253DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
254DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
255
256static const struct device_attribute *drv_attr_list[] = {
257 &dev_attr_vmalloc,
258 &dev_attr_vmalloc_max,
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600259 &dev_attr_page_alloc,
260 &dev_attr_page_alloc_max,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261 &dev_attr_coherent,
262 &dev_attr_coherent_max,
263 &dev_attr_mapped,
264 &dev_attr_mapped_max,
265 &dev_attr_histogram,
266 NULL
267};
268
269void
270kgsl_sharedmem_uninit_sysfs(void)
271{
272 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
273}
274
275int
276kgsl_sharedmem_init_sysfs(void)
277{
278 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
279 drv_attr_list);
280}
281
282#ifdef CONFIG_OUTER_CACHE
283static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
284{
285 switch (op) {
286 case KGSL_CACHE_OP_FLUSH:
287 outer_flush_range(addr, addr + size);
288 break;
289 case KGSL_CACHE_OP_CLEAN:
290 outer_clean_range(addr, addr + size);
291 break;
292 case KGSL_CACHE_OP_INV:
293 outer_inv_range(addr, addr + size);
294 break;
295 }
296}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297
Jordan Croused17e9aa2011-10-12 16:57:48 -0600298static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600300 struct scatterlist *s;
301 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302
Jordan Croused17e9aa2011-10-12 16:57:48 -0600303 for_each_sg(sg, s, sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600304 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600305 _outer_cache_range_op(op, paddr, s->length);
306 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307}
308
Jordan Croused17e9aa2011-10-12 16:57:48 -0600309#else
310static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312}
313#endif
314
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600315static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 struct vm_area_struct *vma,
317 struct vm_fault *vmf)
318{
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600319 unsigned long offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320 struct page *page;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600321 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322
323 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600325 i = offset >> PAGE_SHIFT;
326 page = sg_page(&memdesc->sg[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 if (page == NULL)
328 return VM_FAULT_SIGBUS;
329
330 get_page(page);
331
332 vmf->page = page;
333 return 0;
334}
335
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600336static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337{
338 return VM_RESERVED | VM_DONTEXPAND;
339}
340
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600341static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700342{
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600343 int i = 0;
344 struct scatterlist *sg;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600345 int sglen = memdesc->sglen;
346
347 /* Don't free the guard page if it was used */
348 if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
349 sglen--;
350
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600351 kgsl_driver.stats.page_alloc -= memdesc->size;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600352
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600353 if (memdesc->hostptr) {
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600354 vunmap(memdesc->hostptr);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600355 kgsl_driver.stats.vmalloc -= memdesc->size;
356 }
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600357 if (memdesc->sg)
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600358 for_each_sg(memdesc->sg, sg, sglen, i)
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600359 __free_page(sg_page(sg));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360}
361
362static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
363{
364 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
365}
366
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600367/*
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600368 * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
369 * space
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600370 *
371 * @memdesc - The memory descriptor which contains information about the memory
372 *
373 * Return: 0 on success else error code
374 */
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600375static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600376{
377 if (!memdesc->hostptr) {
378 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
379 struct page **pages = NULL;
380 struct scatterlist *sg;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600381 int sglen = memdesc->sglen;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600382 int i;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600383
384 /* Don't map the guard page if it exists */
385 if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
386 sglen--;
387
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600388 /* create a list of pages to call vmap */
Iliyan Malcheva1a8c2a2012-07-05 13:36:47 -0700389 pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600390 if (!pages) {
Iliyan Malcheva1a8c2a2012-07-05 13:36:47 -0700391 KGSL_CORE_ERR("kmalloc(%d) failed\n",
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600392 sglen * sizeof(struct page *));
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600393 return -ENOMEM;
394 }
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600395 for_each_sg(memdesc->sg, sg, sglen, i)
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600396 pages[i] = sg_page(sg);
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600397 memdesc->hostptr = vmap(pages, sglen,
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600398 VM_IOREMAP, page_prot);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600399 KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
400 kgsl_driver.stats.vmalloc_max);
Iliyan Malcheva1a8c2a2012-07-05 13:36:47 -0700401 kfree(pages);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600402 }
403 if (!memdesc->hostptr)
404 return -ENOMEM;
405
406 return 0;
407}
408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
410 struct vm_area_struct *vma,
411 struct vm_fault *vmf)
412{
413 unsigned long offset, pfn;
414 int ret;
415
416 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
417 PAGE_SHIFT;
418
419 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
420 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
421
422 if (ret == -ENOMEM || ret == -EAGAIN)
423 return VM_FAULT_OOM;
424 else if (ret == -EFAULT)
425 return VM_FAULT_SIGBUS;
426
427 return VM_FAULT_NOPAGE;
428}
429
430static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
431
432{
433 kgsl_driver.stats.coherent -= memdesc->size;
434 if (memdesc->hostptr)
435 iounmap(memdesc->hostptr);
436
437 free_contiguous_memory_by_paddr(memdesc->physaddr);
438}
439
440static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
441{
442 kgsl_driver.stats.coherent -= memdesc->size;
443 dma_free_coherent(NULL, memdesc->size,
444 memdesc->hostptr, memdesc->physaddr);
445}
446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447/* Global - also used by kgsl_drm.c */
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600448struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
449 .free = kgsl_page_alloc_free,
450 .vmflags = kgsl_page_alloc_vmflags,
451 .vmfault = kgsl_page_alloc_vmfault,
452 .map_kernel_mem = kgsl_page_alloc_map_kernel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453};
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600454EXPORT_SYMBOL(kgsl_page_alloc_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455
456static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 .free = kgsl_ebimem_free,
458 .vmflags = kgsl_contiguous_vmflags,
459 .vmfault = kgsl_contiguous_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460};
461
462static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464};
465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
467{
468 void *addr = memdesc->hostptr;
469 int size = memdesc->size;
470
471 switch (op) {
472 case KGSL_CACHE_OP_FLUSH:
473 dmac_flush_range(addr, addr + size);
474 break;
475 case KGSL_CACHE_OP_CLEAN:
476 dmac_clean_range(addr, addr + size);
477 break;
478 case KGSL_CACHE_OP_INV:
479 dmac_inv_range(addr, addr + size);
480 break;
481 }
482
Jordan Croused17e9aa2011-10-12 16:57:48 -0600483 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484}
485EXPORT_SYMBOL(kgsl_cache_range_op);
486
487static int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600488_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 struct kgsl_pagetable *pagetable,
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600490 size_t size, unsigned int protflags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600492 int order, ret = 0;
493 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
494 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600496 /*
497 * Add guard page to the end of the allocation when the
498 * IOMMU is in use.
499 */
500
501 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
502 sglen++;
503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 memdesc->size = size;
505 memdesc->pagetable = pagetable;
506 memdesc->priv = KGSL_MEMFLAGS_CACHED;
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600507 memdesc->ops = &kgsl_page_alloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508
Jordan Crousea652a072012-04-06 16:26:33 -0600509 memdesc->sg = kgsl_sg_alloc(sglen);
510
Jordan Croused17e9aa2011-10-12 16:57:48 -0600511 if (memdesc->sg == NULL) {
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600512 KGSL_CORE_ERR("vmalloc(%d) failed\n",
513 sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -0600514 ret = -ENOMEM;
515 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 }
517
Anshuman Danieecd5202012-02-17 19:52:49 +0530518 kmemleak_not_leak(memdesc->sg);
519
Jordan Croused17e9aa2011-10-12 16:57:48 -0600520 memdesc->sglen = sglen;
521 sg_init_table(memdesc->sg, sglen);
522
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600523 for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600524 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
525 __GFP_HIGHMEM);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600526 if (!page) {
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600527 ret = -ENOMEM;
528 memdesc->sglen = i;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600529 goto done;
530 }
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600531 flush_dcache_page(page);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600532 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
533 }
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600534
535 /* ADd the guard page to the end of the sglist */
536
537 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
538 if (kgsl_guard_page == NULL)
539 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
540 __GFP_HIGHMEM);
541
542 if (kgsl_guard_page != NULL) {
543 sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
544 PAGE_SIZE, 0);
545 memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
546 } else
547 memdesc->sglen--;
548 }
549
Jeremy Gebben7018a212012-04-11 10:23:52 -0600550 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
551 KGSL_CACHE_OP_FLUSH);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600552
Jordan Croused17e9aa2011-10-12 16:57:48 -0600553 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
554
555 if (ret)
556 goto done;
557
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600558 KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
559 kgsl_driver.stats.page_alloc_max);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600560
561 order = get_order(size);
562
563 if (order < 16)
564 kgsl_driver.stats.histogram[order]++;
565
566done:
567 if (ret)
568 kgsl_sharedmem_free(memdesc);
569
570 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571}
572
573int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600574kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 struct kgsl_pagetable *pagetable, size_t size)
576{
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600577 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 BUG_ON(size == 0);
579
580 size = ALIGN(size, PAGE_SIZE * 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600582 ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600584 if (!ret)
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600585 ret = kgsl_page_alloc_map_kernel(memdesc);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600586 if (ret)
587 kgsl_sharedmem_free(memdesc);
588 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589}
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600590EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591
592int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600593kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 struct kgsl_pagetable *pagetable,
595 size_t size, int flags)
596{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 unsigned int protflags;
598
599 BUG_ON(size == 0);
Anshuman Danieecd5202012-02-17 19:52:49 +0530600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 protflags = GSL_PT_PAGE_RV;
602 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
603 protflags |= GSL_PT_PAGE_WV;
604
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600605 return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 protflags);
607}
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600608EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609
610int
611kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
612{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600613 int result = 0;
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 size = ALIGN(size, PAGE_SIZE);
616
Jordan Croused17e9aa2011-10-12 16:57:48 -0600617 memdesc->size = size;
618 memdesc->ops = &kgsl_coherent_ops;
619
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
621 GFP_KERNEL);
622 if (memdesc->hostptr == NULL) {
623 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600624 result = -ENOMEM;
625 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 }
627
Jordan Croused17e9aa2011-10-12 16:57:48 -0600628 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
629 if (result)
630 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631
632 /* Record statistics */
633
634 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
635 kgsl_driver.stats.coherent_max);
636
Jordan Croused17e9aa2011-10-12 16:57:48 -0600637err:
638 if (result)
639 kgsl_sharedmem_free(memdesc);
640
641 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642}
643EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
644
645void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
646{
647 if (memdesc == NULL || memdesc->size == 0)
648 return;
649
650 if (memdesc->gpuaddr)
651 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
652
Jordan Croused17e9aa2011-10-12 16:57:48 -0600653 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 memdesc->ops->free(memdesc);
655
Jordan Crousea652a072012-04-06 16:26:33 -0600656 kgsl_sg_free(memdesc->sg, memdesc->sglen);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600657
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700658 memset(memdesc, 0, sizeof(*memdesc));
659}
660EXPORT_SYMBOL(kgsl_sharedmem_free);
661
662static int
663_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
664 struct kgsl_pagetable *pagetable, size_t size)
665{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600666 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
Jordan Croused17e9aa2011-10-12 16:57:48 -0600668 memdesc->size = size;
669 memdesc->pagetable = pagetable;
670 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
672
673 if (memdesc->physaddr == 0) {
674 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
675 size);
676 return -ENOMEM;
677 }
678
Jordan Croused17e9aa2011-10-12 16:57:48 -0600679 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
680
681 if (result)
682 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700683
684 result = kgsl_mmu_map(pagetable, memdesc,
685 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
686
687 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600688 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689
690 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
691 kgsl_driver.stats.coherent_max);
692
Jordan Croused17e9aa2011-10-12 16:57:48 -0600693err:
694 if (result)
695 kgsl_sharedmem_free(memdesc);
696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 return result;
698}
699
700int
701kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
702 struct kgsl_pagetable *pagetable,
703 size_t size, int flags)
704{
705 size = ALIGN(size, PAGE_SIZE);
706 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
707}
708EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
709
710int
711kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
712 struct kgsl_pagetable *pagetable, size_t size)
713{
714 int result;
715 size = ALIGN(size, 8192);
716 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
717
718 if (result)
719 return result;
720
721 memdesc->hostptr = ioremap(memdesc->physaddr, size);
722
723 if (memdesc->hostptr == NULL) {
724 KGSL_CORE_ERR("ioremap failed\n");
725 kgsl_sharedmem_free(memdesc);
726 return -ENOMEM;
727 }
728
729 return 0;
730}
731EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
732
733int
734kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
735 uint32_t *dst,
736 unsigned int offsetbytes)
737{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700738 uint32_t *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700740 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
741 if (offsetbytes % sizeof(uint32_t) != 0)
742 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700744 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
745 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746 return -ERANGE;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700747 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
748 *dst = *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700749 return 0;
750}
751EXPORT_SYMBOL(kgsl_sharedmem_readl);
752
753int
754kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
755 unsigned int offsetbytes,
756 uint32_t src)
757{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700758 uint32_t *dst;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700760 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
761 if (offsetbytes % sizeof(uint32_t) != 0)
762 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700764 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
765 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
766 return -ERANGE;
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600767 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700768 src, sizeof(uint32_t));
769 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
770 *dst = src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771 return 0;
772}
773EXPORT_SYMBOL(kgsl_sharedmem_writel);
774
775int
776kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
777 unsigned int value, unsigned int sizebytes)
778{
779 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
780 BUG_ON(offsetbytes + sizebytes > memdesc->size);
781
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600782 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
783 sizebytes);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
785 return 0;
786}
787EXPORT_SYMBOL(kgsl_sharedmem_set);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600788
789/*
790 * kgsl_sharedmem_map_vma - Map a user vma to physical memory
791 *
792 * @vma - The user vma to map
793 * @memdesc - The memory descriptor which contains information about the
794 * physical memory
795 *
796 * Return: 0 on success else error code
797 */
798int
799kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
800 const struct kgsl_memdesc *memdesc)
801{
802 unsigned long addr = vma->vm_start;
803 unsigned long size = vma->vm_end - vma->vm_start;
804 int ret, i = 0;
805
806 if (!memdesc->sg || (size != memdesc->size) ||
807 (memdesc->sglen != (size / PAGE_SIZE)))
808 return -EINVAL;
809
810 for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
811 ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
812 if (ret)
813 return ret;
814 }
815 return 0;
816}
817EXPORT_SYMBOL(kgsl_sharedmem_map_vma);