blob: 78c56e3067f23b8300bea8c650f826040e597939 [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013
14#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/vmalloc.h>
16#include <linux/memory_alloc.h>
17#include <asm/cacheflush.h>
Anshuman Danieecd5202012-02-17 19:52:49 +053018#include <linux/slab.h>
19#include <linux/kmemleak.h>
Jordan Crouse89bd3232012-07-02 17:50:15 -060020#include <linux/highmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#include "kgsl.h"
23#include "kgsl_sharedmem.h"
24#include "kgsl_cffdump.h"
25#include "kgsl_device.h"
26
Jordan Crouse1b897cf2011-10-12 16:57:48 -060027/* An attribute for showing per-process memory statistics */
28struct kgsl_mem_entry_attribute {
29 struct attribute attr;
30 int memtype;
31 ssize_t (*show)(struct kgsl_process_private *priv,
32 int type, char *buf);
33};
34
35#define to_mem_entry_attr(a) \
36container_of(a, struct kgsl_mem_entry_attribute, attr)
37
38#define __MEM_ENTRY_ATTR(_type, _name, _show) \
39{ \
40 .attr = { .name = __stringify(_name), .mode = 0444 }, \
41 .memtype = _type, \
42 .show = _show, \
43}
44
45/*
46 * A structure to hold the attributes for a particular memory type.
47 * For each memory type in each process we store the current and maximum
48 * memory usage and display the counts in sysfs. This structure and
49 * the following macro allow us to simplify the definition for those
50 * adding new memory types
51 */
52
53struct mem_entry_stats {
54 int memtype;
55 struct kgsl_mem_entry_attribute attr;
56 struct kgsl_mem_entry_attribute max_attr;
57};
58
59
60#define MEM_ENTRY_STAT(_type, _name) \
61{ \
62 .memtype = _type, \
63 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
64 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
65 mem_entry_max_show), \
66}
67
68
Jordan Crouse7d3139b2012-05-18 10:05:02 -060069/*
70 * One page allocation for a guard region to protect against over-zealous
71 * GPU pre-fetch
72 */
73
74static struct page *kgsl_guard_page;
75
Jordan Crouse1b897cf2011-10-12 16:57:48 -060076/**
77 * Given a kobj, find the process structure attached to it
78 */
79
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080static struct kgsl_process_private *
81_get_priv_from_kobj(struct kobject *kobj)
82{
83 struct kgsl_process_private *private;
84 unsigned long name;
85
86 if (!kobj)
87 return NULL;
88
89 if (sscanf(kobj->name, "%ld", &name) != 1)
90 return NULL;
91
92 list_for_each_entry(private, &kgsl_driver.process_list, list) {
93 if (private->pid == name)
94 return private;
95 }
96
97 return NULL;
98}
99
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600100/**
101 * Show the current amount of memory allocated for the given memtype
102 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700103
104static ssize_t
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600105mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600107 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
108}
109
110/**
111 * Show the maximum memory allocated for the given memtype through the life of
112 * the process
113 */
114
115static ssize_t
116mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
117{
118 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
119}
120
121
122static void mem_entry_sysfs_release(struct kobject *kobj)
123{
124}
125
126static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
127 struct attribute *attr, char *buf)
128{
129 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 struct kgsl_process_private *priv;
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600131 ssize_t ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132
133 mutex_lock(&kgsl_driver.process_mutex);
134 priv = _get_priv_from_kobj(kobj);
135
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600136 if (priv && pattr->show)
137 ret = pattr->show(priv, pattr->memtype, buf);
138 else
139 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140
141 mutex_unlock(&kgsl_driver.process_mutex);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600142 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143}
144
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600145static const struct sysfs_ops mem_entry_sysfs_ops = {
146 .show = mem_entry_sysfs_show,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147};
148
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600149static struct kobj_type ktype_mem_entry = {
150 .sysfs_ops = &mem_entry_sysfs_ops,
151 .default_attrs = NULL,
152 .release = mem_entry_sysfs_release
153};
154
155static struct mem_entry_stats mem_stats[] = {
156 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
157#ifdef CONFIG_ANDROID_PMEM
158 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
159#endif
160#ifdef CONFIG_ASHMEM
161 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
162#endif
163 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600164#ifdef CONFIG_ION
Jeremy Gebbenff6eab02012-01-09 09:42:21 -0700165 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600166#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167};
168
169void
170kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
171{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600172 int i;
173
174 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
175 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
176 sysfs_remove_file(&private->kobj,
177 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 }
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600179
180 kobject_put(&private->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181}
182
183void
184kgsl_process_init_sysfs(struct kgsl_process_private *private)
185{
186 unsigned char name[16];
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600187 int i, ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 snprintf(name, sizeof(name), "%d", private->pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600191 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
192 kgsl_driver.prockobj, name))
193 return;
194
195 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
196 /* We need to check the value of sysfs_create_file, but we
197 * don't really care if it passed or not */
198
199 ret = sysfs_create_file(&private->kobj,
200 &mem_stats[i].attr.attr);
201 ret = sysfs_create_file(&private->kobj,
202 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700203 }
204}
205
206static int kgsl_drv_memstat_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 unsigned int val = 0;
211
212 if (!strncmp(attr->attr.name, "vmalloc", 7))
213 val = kgsl_driver.stats.vmalloc;
214 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
215 val = kgsl_driver.stats.vmalloc_max;
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600216 else if (!strncmp(attr->attr.name, "page_alloc", 10))
217 val = kgsl_driver.stats.page_alloc;
218 else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
219 val = kgsl_driver.stats.page_alloc_max;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220 else if (!strncmp(attr->attr.name, "coherent", 8))
221 val = kgsl_driver.stats.coherent;
222 else if (!strncmp(attr->attr.name, "coherent_max", 12))
223 val = kgsl_driver.stats.coherent_max;
224 else if (!strncmp(attr->attr.name, "mapped", 6))
225 val = kgsl_driver.stats.mapped;
226 else if (!strncmp(attr->attr.name, "mapped_max", 10))
227 val = kgsl_driver.stats.mapped_max;
228
229 return snprintf(buf, PAGE_SIZE, "%u\n", val);
230}
231
232static int kgsl_drv_histogram_show(struct device *dev,
233 struct device_attribute *attr,
234 char *buf)
235{
236 int len = 0;
237 int i;
238
239 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600240 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 kgsl_driver.stats.histogram[i]);
242
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600243 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 return len;
245}
246
247DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
248DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600249DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
250DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
252DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
253DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
254DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
255DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
256
257static const struct device_attribute *drv_attr_list[] = {
258 &dev_attr_vmalloc,
259 &dev_attr_vmalloc_max,
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600260 &dev_attr_page_alloc,
261 &dev_attr_page_alloc_max,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 &dev_attr_coherent,
263 &dev_attr_coherent_max,
264 &dev_attr_mapped,
265 &dev_attr_mapped_max,
266 &dev_attr_histogram,
267 NULL
268};
269
270void
271kgsl_sharedmem_uninit_sysfs(void)
272{
273 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
274}
275
276int
277kgsl_sharedmem_init_sysfs(void)
278{
279 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
280 drv_attr_list);
281}
282
283#ifdef CONFIG_OUTER_CACHE
284static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
285{
286 switch (op) {
287 case KGSL_CACHE_OP_FLUSH:
288 outer_flush_range(addr, addr + size);
289 break;
290 case KGSL_CACHE_OP_CLEAN:
291 outer_clean_range(addr, addr + size);
292 break;
293 case KGSL_CACHE_OP_INV:
294 outer_inv_range(addr, addr + size);
295 break;
296 }
297}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298
Jordan Croused17e9aa2011-10-12 16:57:48 -0600299static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600301 struct scatterlist *s;
302 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303
Jordan Croused17e9aa2011-10-12 16:57:48 -0600304 for_each_sg(sg, s, sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600305 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600306 _outer_cache_range_op(op, paddr, s->length);
307 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308}
309
Jordan Croused17e9aa2011-10-12 16:57:48 -0600310#else
311static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313}
314#endif
315
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600316static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 struct vm_area_struct *vma,
318 struct vm_fault *vmf)
319{
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800320 int i, pgoff;
321 struct scatterlist *s = memdesc->sg;
322 unsigned int offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800324 offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800326 if (offset >= memdesc->size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 return VM_FAULT_SIGBUS;
328
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800329 pgoff = offset >> PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800331 /*
332 * The sglist might be comprised of mixed blocks of memory depending
333 * on how many 64K pages were allocated. This means we have to do math
334 * to find the actual 4K page to map in user space
335 */
336
337 for (i = 0; i < memdesc->sglen; i++) {
338 int npages = s->length >> PAGE_SHIFT;
339
340 if (pgoff < npages) {
341 struct page *page = sg_page(s);
342
343 page = nth_page(page, pgoff);
344
345 get_page(page);
346 vmf->page = page;
347
348 return 0;
349 }
350
351 pgoff -= npages;
352 s = sg_next(s);
353 }
354
355 return VM_FAULT_SIGBUS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356}
357
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600358static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359{
360 return VM_RESERVED | VM_DONTEXPAND;
361}
362
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600363static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364{
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600365 int i = 0;
366 struct scatterlist *sg;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600367 int sglen = memdesc->sglen;
368
369 /* Don't free the guard page if it was used */
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600370 if (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE)
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600371 sglen--;
372
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600373 kgsl_driver.stats.page_alloc -= memdesc->size;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600374
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600375 if (memdesc->hostptr) {
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600376 vunmap(memdesc->hostptr);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600377 kgsl_driver.stats.vmalloc -= memdesc->size;
378 }
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600379 if (memdesc->sg)
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600380 for_each_sg(memdesc->sg, sg, sglen, i)
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800381 __free_pages(sg_page(sg), get_order(sg->length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382}
383
384static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
385{
386 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
387}
388
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600389/*
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600390 * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
391 * space
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600392 *
393 * @memdesc - The memory descriptor which contains information about the memory
394 *
395 * Return: 0 on success else error code
396 */
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600397static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600398{
399 if (!memdesc->hostptr) {
400 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
401 struct page **pages = NULL;
402 struct scatterlist *sg;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800403 int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600404 int sglen = memdesc->sglen;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800405 int i, count = 0;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600406
407 /* Don't map the guard page if it exists */
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600408 if (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE)
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600409 sglen--;
410
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600411 /* create a list of pages to call vmap */
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800412 pages = vmalloc(npages * sizeof(struct page *));
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600413 if (!pages) {
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800414 KGSL_CORE_ERR("vmalloc(%d) failed\n",
415 npages * sizeof(struct page *));
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600416 return -ENOMEM;
417 }
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800418
419 for_each_sg(memdesc->sg, sg, sglen, i) {
420 struct page *page = sg_page(sg);
421 int j;
422
423 for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
424 pages[count++] = page++;
425 }
426
427
428 memdesc->hostptr = vmap(pages, count,
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600429 VM_IOREMAP, page_prot);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600430 KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
431 kgsl_driver.stats.vmalloc_max);
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800432 vfree(pages);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600433 }
434 if (!memdesc->hostptr)
435 return -ENOMEM;
436
437 return 0;
438}
439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
441 struct vm_area_struct *vma,
442 struct vm_fault *vmf)
443{
444 unsigned long offset, pfn;
445 int ret;
446
447 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
448 PAGE_SHIFT;
449
450 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
451 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
452
453 if (ret == -ENOMEM || ret == -EAGAIN)
454 return VM_FAULT_OOM;
455 else if (ret == -EFAULT)
456 return VM_FAULT_SIGBUS;
457
458 return VM_FAULT_NOPAGE;
459}
460
461static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
462
463{
464 kgsl_driver.stats.coherent -= memdesc->size;
465 if (memdesc->hostptr)
466 iounmap(memdesc->hostptr);
467
468 free_contiguous_memory_by_paddr(memdesc->physaddr);
469}
470
Shubhraprakash Das387b6692012-08-11 18:26:29 -0700471static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc)
472{
473 if (!memdesc->hostptr) {
474 memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size);
475 if (!memdesc->hostptr) {
476 KGSL_CORE_ERR("ioremap failed, addr:0x%p, size:0x%x\n",
477 memdesc->hostptr, memdesc->size);
478 return -ENOMEM;
479 }
480 }
481
482 return 0;
483}
484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
486{
487 kgsl_driver.stats.coherent -= memdesc->size;
488 dma_free_coherent(NULL, memdesc->size,
489 memdesc->hostptr, memdesc->physaddr);
490}
491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700492/* Global - also used by kgsl_drm.c */
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600493struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
494 .free = kgsl_page_alloc_free,
495 .vmflags = kgsl_page_alloc_vmflags,
496 .vmfault = kgsl_page_alloc_vmfault,
497 .map_kernel_mem = kgsl_page_alloc_map_kernel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498};
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600499EXPORT_SYMBOL(kgsl_page_alloc_ops);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
501static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 .free = kgsl_ebimem_free,
503 .vmflags = kgsl_contiguous_vmflags,
504 .vmfault = kgsl_contiguous_vmfault,
Shubhraprakash Das387b6692012-08-11 18:26:29 -0700505 .map_kernel_mem = kgsl_ebimem_map_kernel,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506};
507
508static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510};
511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
513{
514 void *addr = memdesc->hostptr;
515 int size = memdesc->size;
516
517 switch (op) {
518 case KGSL_CACHE_OP_FLUSH:
519 dmac_flush_range(addr, addr + size);
520 break;
521 case KGSL_CACHE_OP_CLEAN:
522 dmac_clean_range(addr, addr + size);
523 break;
524 case KGSL_CACHE_OP_INV:
525 dmac_inv_range(addr, addr + size);
526 break;
527 }
528
Jordan Croused17e9aa2011-10-12 16:57:48 -0600529 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530}
531EXPORT_SYMBOL(kgsl_cache_range_op);
532
533static int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600534_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600536 size_t size, unsigned int protflags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537{
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800538 int pcount = 0, order, ret = 0;
539 int j, len, page_size, sglen_alloc, sglen = 0;
Jordan Crouse89bd3232012-07-02 17:50:15 -0600540 struct page **pages = NULL;
541 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
542 void *ptr;
Jordan Crouse2151cb92012-08-21 14:02:59 -0600543 struct sysinfo si;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800544 unsigned int align;
Jordan Crouse2151cb92012-08-21 14:02:59 -0600545
546 /*
547 * Get the current memory information to be used in deciding if we
548 * should go ahead with this allocation
549 */
550
551 si_meminfo(&si);
552
553 /*
554 * Limit the size of the allocation to the amount of free memory minus
555 * 32MB. Why 32MB? Because thats the buffer that page_alloc uses and
556 * it just seems like a reasonable limit that won't make the OOM killer
557 * go all serial on us. Of course, if we are down this low all bets
558 * are off but above all do no harm.
559 */
560
561 if (size >= ((si.freeram << PAGE_SHIFT) - SZ_32M))
562 return -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600564 align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800565
566 page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
567 ? SZ_64K : PAGE_SIZE;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600568 /* update align flags for what we actually use */
569 kgsl_memdesc_set_align(memdesc, ilog2(page_size));
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800570
571 /*
572 * There needs to be enough room in the sg structure to be able to
573 * service the allocation entirely with PAGE_SIZE sized chunks
574 */
575
576 sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
577
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600578 /*
579 * Add guard page to the end of the allocation when the
580 * IOMMU is in use.
581 */
582
583 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800584 sglen_alloc++;
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 memdesc->size = size;
587 memdesc->pagetable = pagetable;
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600588 memdesc->ops = &kgsl_page_alloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800590 memdesc->sg = kgsl_sg_alloc(sglen_alloc);
Jordan Crousea652a072012-04-06 16:26:33 -0600591
Jordan Croused17e9aa2011-10-12 16:57:48 -0600592 if (memdesc->sg == NULL) {
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600593 KGSL_CORE_ERR("vmalloc(%d) failed\n",
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800594 sglen_alloc * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -0600595 ret = -ENOMEM;
596 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 }
598
Jordan Crouse89bd3232012-07-02 17:50:15 -0600599 /*
600 * Allocate space to store the list of pages to send to vmap.
601 * This is an array of pointers so we can track 1024 pages per page of
602 * allocation which means we can handle up to a 8MB buffer request with
603 * two pages; well within the acceptable limits for using kmalloc.
604 */
605
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800606 pages = kmalloc(sglen_alloc * sizeof(struct page *), GFP_KERNEL);
Jordan Crouse89bd3232012-07-02 17:50:15 -0600607
608 if (pages == NULL) {
609 KGSL_CORE_ERR("kmalloc (%d) failed\n",
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800610 sglen_alloc * sizeof(struct page *));
Jordan Crouse89bd3232012-07-02 17:50:15 -0600611 ret = -ENOMEM;
612 goto done;
613 }
614
Anshuman Danieecd5202012-02-17 19:52:49 +0530615 kmemleak_not_leak(memdesc->sg);
616
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800617 memdesc->sglen_alloc = sglen_alloc;
618 sg_init_table(memdesc->sg, sglen_alloc);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600619
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800620 len = size;
Jordan Crouse89bd3232012-07-02 17:50:15 -0600621
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800622 while (len > 0) {
623 struct page *page;
624 unsigned int gfp_mask = GFP_KERNEL | __GFP_HIGHMEM |
625 __GFP_NOWARN;
626 int j;
Jordan Crouse89bd3232012-07-02 17:50:15 -0600627
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800628 /* don't waste space at the end of the allocation*/
629 if (len < page_size)
630 page_size = PAGE_SIZE;
631
632 if (page_size != PAGE_SIZE)
633 gfp_mask |= __GFP_COMP;
634
635 page = alloc_pages(gfp_mask, get_order(page_size));
636
637 if (page == NULL) {
638 if (page_size != PAGE_SIZE) {
639 page_size = PAGE_SIZE;
640 continue;
641 }
Jordan Croused17e9aa2011-10-12 16:57:48 -0600642 }
Jordan Crouse89bd3232012-07-02 17:50:15 -0600643
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800644 for (j = 0; j < page_size >> PAGE_SHIFT; j++)
645 pages[pcount++] = nth_page(page, j);
646
647 sg_set_page(&memdesc->sg[sglen++], page, page_size, 0);
648 len -= page_size;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600649 }
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600650
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800651 /* Add the guard page to the end of the sglist */
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600652
653 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
Jordan Crouse89bd3232012-07-02 17:50:15 -0600654 /*
655 * It doesn't matter if we use GFP_ZERO here, this never
656 * gets mapped, and we only allocate it once in the life
657 * of the system
658 */
659
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600660 if (kgsl_guard_page == NULL)
661 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
662 __GFP_HIGHMEM);
663
664 if (kgsl_guard_page != NULL) {
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800665 sg_set_page(&memdesc->sg[sglen++], kgsl_guard_page,
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600666 PAGE_SIZE, 0);
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600667 memdesc->priv |= KGSL_MEMDESC_GUARD_PAGE;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800668 }
Jordan Crouse7d3139b2012-05-18 10:05:02 -0600669 }
670
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800671 memdesc->sglen = sglen;
672
Jordan Crouse89bd3232012-07-02 17:50:15 -0600673 /*
674 * All memory that goes to the user has to be zeroed out before it gets
675 * exposed to userspace. This means that the memory has to be mapped in
676 * the kernel, zeroed (memset) and then unmapped. This also means that
677 * the dcache has to be flushed to ensure coherency between the kernel
678 * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
679 * zeroed and unmaped each individual page, and then we had to turn
680 * around and call flush_dcache_page() on that page to clear the caches.
681 * This was killing us for performance. Instead, we found it is much
682 * faster to allocate the pages without GFP_ZERO, map the entire range,
683 * memset it, flush the range and then unmap - this results in a factor
684 * of 4 improvement for speed for large buffers. There is a small
685 * increase in speed for small buffers, but only on the order of a few
686 * microseconds at best. The only downside is that there needs to be
687 * enough temporary space in vmalloc to accomodate the map. This
688 * shouldn't be a problem, but if it happens, fall back to a much slower
689 * path
690 */
691
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800692 ptr = vmap(pages, pcount, VM_IOREMAP, page_prot);
Jordan Crouse89bd3232012-07-02 17:50:15 -0600693
694 if (ptr != NULL) {
695 memset(ptr, 0, memdesc->size);
696 dmac_flush_range(ptr, ptr + memdesc->size);
697 vunmap(ptr);
698 } else {
Jordan Crouse89bd3232012-07-02 17:50:15 -0600699 /* Very, very, very slow path */
700
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800701 for (j = 0; j < pcount; j++) {
Jordan Crouse89bd3232012-07-02 17:50:15 -0600702 ptr = kmap_atomic(pages[j]);
703 memset(ptr, 0, PAGE_SIZE);
704 dmac_flush_range(ptr, ptr + PAGE_SIZE);
705 kunmap_atomic(ptr);
706 }
707 }
708
Jeremy Gebben7018a212012-04-11 10:23:52 -0600709 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
710 KGSL_CACHE_OP_FLUSH);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600711
Jordan Croused17e9aa2011-10-12 16:57:48 -0600712 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
713
714 if (ret)
715 goto done;
716
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600717 KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
718 kgsl_driver.stats.page_alloc_max);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600719
720 order = get_order(size);
721
722 if (order < 16)
723 kgsl_driver.stats.histogram[order]++;
724
725done:
Jordan Crouse89bd3232012-07-02 17:50:15 -0600726 kfree(pages);
727
Jordan Croused17e9aa2011-10-12 16:57:48 -0600728 if (ret)
729 kgsl_sharedmem_free(memdesc);
730
731 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732}
733
734int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600735kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700736 struct kgsl_pagetable *pagetable, size_t size)
737{
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600738 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 BUG_ON(size == 0);
740
741 size = ALIGN(size, PAGE_SIZE * 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700742
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600743 ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600744 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600745 if (!ret)
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600746 ret = kgsl_page_alloc_map_kernel(memdesc);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600747 if (ret)
748 kgsl_sharedmem_free(memdesc);
749 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700750}
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600751EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700752
753int
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600754kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600756 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 unsigned int protflags;
759
Jordan Crousee752ab22012-08-20 13:26:05 -0600760 if (size == 0)
761 return -EINVAL;
Anshuman Danieecd5202012-02-17 19:52:49 +0530762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700763 protflags = GSL_PT_PAGE_RV;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600764 if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 protflags |= GSL_PT_PAGE_WV;
766
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600767 return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600768 protflags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769}
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600770EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771
772int
773kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
774{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600775 int result = 0;
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777 size = ALIGN(size, PAGE_SIZE);
778
Jordan Croused17e9aa2011-10-12 16:57:48 -0600779 memdesc->size = size;
780 memdesc->ops = &kgsl_coherent_ops;
781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700782 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
783 GFP_KERNEL);
784 if (memdesc->hostptr == NULL) {
785 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600786 result = -ENOMEM;
787 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700788 }
789
Jordan Croused17e9aa2011-10-12 16:57:48 -0600790 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
791 if (result)
792 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793
794 /* Record statistics */
795
796 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
797 kgsl_driver.stats.coherent_max);
798
Jordan Croused17e9aa2011-10-12 16:57:48 -0600799err:
800 if (result)
801 kgsl_sharedmem_free(memdesc);
802
803 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804}
805EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
806
807void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
808{
809 if (memdesc == NULL || memdesc->size == 0)
810 return;
811
812 if (memdesc->gpuaddr)
813 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
814
Jordan Croused17e9aa2011-10-12 16:57:48 -0600815 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 memdesc->ops->free(memdesc);
817
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800818 kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600819
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 memset(memdesc, 0, sizeof(*memdesc));
821}
822EXPORT_SYMBOL(kgsl_sharedmem_free);
823
824static int
825_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
826 struct kgsl_pagetable *pagetable, size_t size)
827{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600828 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700829
Jordan Croused17e9aa2011-10-12 16:57:48 -0600830 memdesc->size = size;
831 memdesc->pagetable = pagetable;
832 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
834
835 if (memdesc->physaddr == 0) {
836 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
837 size);
838 return -ENOMEM;
839 }
840
Jordan Croused17e9aa2011-10-12 16:57:48 -0600841 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
842
843 if (result)
844 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845
846 result = kgsl_mmu_map(pagetable, memdesc,
847 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
848
849 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600850 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851
852 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
853 kgsl_driver.stats.coherent_max);
854
Jordan Croused17e9aa2011-10-12 16:57:48 -0600855err:
856 if (result)
857 kgsl_sharedmem_free(memdesc);
858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 return result;
860}
861
862int
863kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
864 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600865 size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866{
867 size = ALIGN(size, PAGE_SIZE);
868 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
869}
870EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
871
872int
873kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
874 struct kgsl_pagetable *pagetable, size_t size)
875{
876 int result;
877 size = ALIGN(size, 8192);
878 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
879
880 if (result)
881 return result;
882
883 memdesc->hostptr = ioremap(memdesc->physaddr, size);
884
885 if (memdesc->hostptr == NULL) {
886 KGSL_CORE_ERR("ioremap failed\n");
887 kgsl_sharedmem_free(memdesc);
888 return -ENOMEM;
889 }
890
891 return 0;
892}
893EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
894
895int
896kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
897 uint32_t *dst,
898 unsigned int offsetbytes)
899{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700900 uint32_t *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700902 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
903 if (offsetbytes % sizeof(uint32_t) != 0)
904 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700905
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700906 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
907 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908 return -ERANGE;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700909 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
910 *dst = *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 return 0;
912}
913EXPORT_SYMBOL(kgsl_sharedmem_readl);
914
915int
916kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
917 unsigned int offsetbytes,
918 uint32_t src)
919{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700920 uint32_t *dst;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700922 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
923 if (offsetbytes % sizeof(uint32_t) != 0)
924 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700925
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700926 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
927 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
928 return -ERANGE;
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600929 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700930 src, sizeof(uint32_t));
931 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
932 *dst = src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 return 0;
934}
935EXPORT_SYMBOL(kgsl_sharedmem_writel);
936
937int
938kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
939 unsigned int value, unsigned int sizebytes)
940{
941 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
942 BUG_ON(offsetbytes + sizebytes > memdesc->size);
943
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600944 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
945 sizebytes);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
947 return 0;
948}
949EXPORT_SYMBOL(kgsl_sharedmem_set);
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600950
951/*
952 * kgsl_sharedmem_map_vma - Map a user vma to physical memory
953 *
954 * @vma - The user vma to map
955 * @memdesc - The memory descriptor which contains information about the
956 * physical memory
957 *
958 * Return: 0 on success else error code
959 */
960int
961kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
962 const struct kgsl_memdesc *memdesc)
963{
964 unsigned long addr = vma->vm_start;
965 unsigned long size = vma->vm_end - vma->vm_start;
966 int ret, i = 0;
967
968 if (!memdesc->sg || (size != memdesc->size) ||
969 (memdesc->sglen != (size / PAGE_SIZE)))
970 return -EINVAL;
971
972 for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
973 ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
974 if (ret)
975 return ret;
976 }
977 return 0;
978}
979EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
Jeremy Gebben158a5c02012-09-24 14:27:25 -0600980
981static const char * const memtype_str[] = {
982 [KGSL_MEMTYPE_OBJECTANY] = "any(0)",
983 [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
984 [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
985 [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
986 [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
987 [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
988 [KGSL_MEMTYPE_TEXTURE] = "texture",
989 [KGSL_MEMTYPE_SURFACE] = "surface",
990 [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
991 [KGSL_MEMTYPE_GL] = "gl",
992 [KGSL_MEMTYPE_CL] = "cl",
993 [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
994 [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
995 [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
996 [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
997 [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
998 [KGSL_MEMTYPE_COMMAND] = "command",
999 [KGSL_MEMTYPE_2D] = "2d",
1000 [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
1001 [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
1002 [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
1003 /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
1004};
1005
1006void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags)
1007{
1008 unsigned char type;
1009
1010 type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT;
1011 if (type == KGSL_MEMTYPE_KERNEL)
1012 strlcpy(name, "kernel", name_size);
1013 else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
1014 strlcpy(name, memtype_str[type], name_size);
1015 else
1016 snprintf(name, name_size, "unknown(%3d)", type);
1017}
1018EXPORT_SYMBOL(kgsl_get_memory_usage);