blob: d0837026b3e44bc2caaaf2ce34bd1844f528b55a [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/vmalloc.h>
14#include <linux/memory_alloc.h>
15#include <asm/cacheflush.h>
Anshuman Danieecd5202012-02-17 19:52:49 +053016#include <linux/slab.h>
17#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070018
19#include "kgsl.h"
20#include "kgsl_sharedmem.h"
21#include "kgsl_cffdump.h"
22#include "kgsl_device.h"
23
Jordan Crouse1b897cf2011-10-12 16:57:48 -060024/* An attribute for showing per-process memory statistics */
25struct kgsl_mem_entry_attribute {
26 struct attribute attr;
27 int memtype;
28 ssize_t (*show)(struct kgsl_process_private *priv,
29 int type, char *buf);
30};
31
32#define to_mem_entry_attr(a) \
33container_of(a, struct kgsl_mem_entry_attribute, attr)
34
35#define __MEM_ENTRY_ATTR(_type, _name, _show) \
36{ \
37 .attr = { .name = __stringify(_name), .mode = 0444 }, \
38 .memtype = _type, \
39 .show = _show, \
40}
41
42/*
43 * A structure to hold the attributes for a particular memory type.
44 * For each memory type in each process we store the current and maximum
45 * memory usage and display the counts in sysfs. This structure and
46 * the following macro allow us to simplify the definition for those
47 * adding new memory types
48 */
49
50struct mem_entry_stats {
51 int memtype;
52 struct kgsl_mem_entry_attribute attr;
53 struct kgsl_mem_entry_attribute max_attr;
54};
55
56
57#define MEM_ENTRY_STAT(_type, _name) \
58{ \
59 .memtype = _type, \
60 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
61 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
62 mem_entry_max_show), \
63}
64
65
66/**
67 * Given a kobj, find the process structure attached to it
68 */
69
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070static struct kgsl_process_private *
71_get_priv_from_kobj(struct kobject *kobj)
72{
73 struct kgsl_process_private *private;
74 unsigned long name;
75
76 if (!kobj)
77 return NULL;
78
79 if (sscanf(kobj->name, "%ld", &name) != 1)
80 return NULL;
81
82 list_for_each_entry(private, &kgsl_driver.process_list, list) {
83 if (private->pid == name)
84 return private;
85 }
86
87 return NULL;
88}
89
Jordan Crouse1b897cf2011-10-12 16:57:48 -060090/**
91 * Show the current amount of memory allocated for the given memtype
92 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093
94static ssize_t
Jordan Crouse1b897cf2011-10-12 16:57:48 -060095mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096{
Jordan Crouse1b897cf2011-10-12 16:57:48 -060097 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
98}
99
100/**
101 * Show the maximum memory allocated for the given memtype through the life of
102 * the process
103 */
104
105static ssize_t
106mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
107{
108 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
109}
110
111
112static void mem_entry_sysfs_release(struct kobject *kobj)
113{
114}
115
116static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
117 struct attribute *attr, char *buf)
118{
119 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 struct kgsl_process_private *priv;
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600121 ssize_t ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700122
123 mutex_lock(&kgsl_driver.process_mutex);
124 priv = _get_priv_from_kobj(kobj);
125
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600126 if (priv && pattr->show)
127 ret = pattr->show(priv, pattr->memtype, buf);
128 else
129 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130
131 mutex_unlock(&kgsl_driver.process_mutex);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600132 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133}
134
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600135static const struct sysfs_ops mem_entry_sysfs_ops = {
136 .show = mem_entry_sysfs_show,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137};
138
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600139static struct kobj_type ktype_mem_entry = {
140 .sysfs_ops = &mem_entry_sysfs_ops,
141 .default_attrs = NULL,
142 .release = mem_entry_sysfs_release
143};
144
145static struct mem_entry_stats mem_stats[] = {
146 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
147#ifdef CONFIG_ANDROID_PMEM
148 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
149#endif
150#ifdef CONFIG_ASHMEM
151 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
152#endif
153 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600154#ifdef CONFIG_ION
Jeremy Gebbenff6eab02012-01-09 09:42:21 -0700155 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600156#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157};
158
159void
160kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
161{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600162 int i;
163
164 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
165 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
166 sysfs_remove_file(&private->kobj,
167 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168 }
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600169
170 kobject_put(&private->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171}
172
173void
174kgsl_process_init_sysfs(struct kgsl_process_private *private)
175{
176 unsigned char name[16];
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600177 int i, ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 snprintf(name, sizeof(name), "%d", private->pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600181 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
182 kgsl_driver.prockobj, name))
183 return;
184
185 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
186 /* We need to check the value of sysfs_create_file, but we
187 * don't really care if it passed or not */
188
189 ret = sysfs_create_file(&private->kobj,
190 &mem_stats[i].attr.attr);
191 ret = sysfs_create_file(&private->kobj,
192 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 }
194}
195
196static int kgsl_drv_memstat_show(struct device *dev,
197 struct device_attribute *attr,
198 char *buf)
199{
200 unsigned int val = 0;
201
202 if (!strncmp(attr->attr.name, "vmalloc", 7))
203 val = kgsl_driver.stats.vmalloc;
204 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
205 val = kgsl_driver.stats.vmalloc_max;
206 else if (!strncmp(attr->attr.name, "coherent", 8))
207 val = kgsl_driver.stats.coherent;
208 else if (!strncmp(attr->attr.name, "coherent_max", 12))
209 val = kgsl_driver.stats.coherent_max;
210 else if (!strncmp(attr->attr.name, "mapped", 6))
211 val = kgsl_driver.stats.mapped;
212 else if (!strncmp(attr->attr.name, "mapped_max", 10))
213 val = kgsl_driver.stats.mapped_max;
214
215 return snprintf(buf, PAGE_SIZE, "%u\n", val);
216}
217
218static int kgsl_drv_histogram_show(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 int len = 0;
223 int i;
224
225 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600226 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 kgsl_driver.stats.histogram[i]);
228
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600229 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700230 return len;
231}
232
233DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
234DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
235DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
236DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
237DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
238DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
239DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
240
241static const struct device_attribute *drv_attr_list[] = {
242 &dev_attr_vmalloc,
243 &dev_attr_vmalloc_max,
244 &dev_attr_coherent,
245 &dev_attr_coherent_max,
246 &dev_attr_mapped,
247 &dev_attr_mapped_max,
248 &dev_attr_histogram,
249 NULL
250};
251
252void
253kgsl_sharedmem_uninit_sysfs(void)
254{
255 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
256}
257
258int
259kgsl_sharedmem_init_sysfs(void)
260{
261 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
262 drv_attr_list);
263}
264
265#ifdef CONFIG_OUTER_CACHE
266static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
267{
268 switch (op) {
269 case KGSL_CACHE_OP_FLUSH:
270 outer_flush_range(addr, addr + size);
271 break;
272 case KGSL_CACHE_OP_CLEAN:
273 outer_clean_range(addr, addr + size);
274 break;
275 case KGSL_CACHE_OP_INV:
276 outer_inv_range(addr, addr + size);
277 break;
278 }
279}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280
Jordan Croused17e9aa2011-10-12 16:57:48 -0600281static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600283 struct scatterlist *s;
284 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285
Jordan Croused17e9aa2011-10-12 16:57:48 -0600286 for_each_sg(sg, s, sglen, i) {
Jeremy Gebben582fe312012-03-23 10:19:44 -0600287 unsigned int paddr = kgsl_get_sg_pa(s);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600288 _outer_cache_range_op(op, paddr, s->length);
289 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290}
291
Jordan Croused17e9aa2011-10-12 16:57:48 -0600292#else
293static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295}
296#endif
297
298static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
299 struct vm_area_struct *vma,
300 struct vm_fault *vmf)
301{
302 unsigned long offset, pg;
303 struct page *page;
304
305 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
306 pg = (unsigned long) memdesc->hostptr + offset;
307
308 page = vmalloc_to_page((void *) pg);
309 if (page == NULL)
310 return VM_FAULT_SIGBUS;
311
312 get_page(page);
313
314 vmf->page = page;
315 return 0;
316}
317
318static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
319{
320 return VM_RESERVED | VM_DONTEXPAND;
321}
322
323static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
324{
325 kgsl_driver.stats.vmalloc -= memdesc->size;
326 vfree(memdesc->hostptr);
327}
328
329static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
330{
331 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
332}
333
334static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
335 struct vm_area_struct *vma,
336 struct vm_fault *vmf)
337{
338 unsigned long offset, pfn;
339 int ret;
340
341 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
342 PAGE_SHIFT;
343
344 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
345 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
346
347 if (ret == -ENOMEM || ret == -EAGAIN)
348 return VM_FAULT_OOM;
349 else if (ret == -EFAULT)
350 return VM_FAULT_SIGBUS;
351
352 return VM_FAULT_NOPAGE;
353}
354
355static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
356
357{
358 kgsl_driver.stats.coherent -= memdesc->size;
359 if (memdesc->hostptr)
360 iounmap(memdesc->hostptr);
361
362 free_contiguous_memory_by_paddr(memdesc->physaddr);
363}
364
365static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
366{
367 kgsl_driver.stats.coherent -= memdesc->size;
368 dma_free_coherent(NULL, memdesc->size,
369 memdesc->hostptr, memdesc->physaddr);
370}
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372/* Global - also used by kgsl_drm.c */
373struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374 .free = kgsl_vmalloc_free,
375 .vmflags = kgsl_vmalloc_vmflags,
376 .vmfault = kgsl_vmalloc_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377};
378EXPORT_SYMBOL(kgsl_vmalloc_ops);
379
380static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381 .free = kgsl_ebimem_free,
382 .vmflags = kgsl_contiguous_vmflags,
383 .vmfault = kgsl_contiguous_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384};
385
386static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388};
389
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
391{
392 void *addr = memdesc->hostptr;
393 int size = memdesc->size;
394
395 switch (op) {
396 case KGSL_CACHE_OP_FLUSH:
397 dmac_flush_range(addr, addr + size);
398 break;
399 case KGSL_CACHE_OP_CLEAN:
400 dmac_clean_range(addr, addr + size);
401 break;
402 case KGSL_CACHE_OP_INV:
403 dmac_inv_range(addr, addr + size);
404 break;
405 }
406
Jordan Croused17e9aa2011-10-12 16:57:48 -0600407 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408}
409EXPORT_SYMBOL(kgsl_cache_range_op);
410
411static int
412_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
413 struct kgsl_pagetable *pagetable,
414 void *ptr, size_t size, unsigned int protflags)
415{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600416 int order, ret = 0;
417 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
418 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419
420 memdesc->size = size;
421 memdesc->pagetable = pagetable;
422 memdesc->priv = KGSL_MEMFLAGS_CACHED;
423 memdesc->ops = &kgsl_vmalloc_ops;
424 memdesc->hostptr = (void *) ptr;
425
Jeff Boody28afec42012-01-18 15:47:46 -0700426 memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -0600427 if (memdesc->sg == NULL) {
428 ret = -ENOMEM;
429 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 }
431
Anshuman Danieecd5202012-02-17 19:52:49 +0530432 kmemleak_not_leak(memdesc->sg);
433
Jordan Croused17e9aa2011-10-12 16:57:48 -0600434 memdesc->sglen = sglen;
435 sg_init_table(memdesc->sg, sglen);
436
437 for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
438 struct page *page = vmalloc_to_page(ptr);
439 if (!page) {
440 ret = -EINVAL;
441 goto done;
442 }
443 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
444 }
445
446 kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
447
448 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
449
450 if (ret)
451 goto done;
452
453 KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
454 kgsl_driver.stats.vmalloc_max);
455
456 order = get_order(size);
457
458 if (order < 16)
459 kgsl_driver.stats.histogram[order]++;
460
461done:
462 if (ret)
463 kgsl_sharedmem_free(memdesc);
464
465 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466}
467
468int
469kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
470 struct kgsl_pagetable *pagetable, size_t size)
471{
472 void *ptr;
473
474 BUG_ON(size == 0);
475
476 size = ALIGN(size, PAGE_SIZE * 2);
477 ptr = vmalloc(size);
478
479 if (ptr == NULL) {
480 KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
481 return -ENOMEM;
482 }
483
484 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
485 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
486}
487EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
488
489int
490kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
491 struct kgsl_pagetable *pagetable,
492 size_t size, int flags)
493{
494 void *ptr;
495 unsigned int protflags;
496
497 BUG_ON(size == 0);
498 ptr = vmalloc_user(size);
499
500 if (ptr == NULL) {
501 KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
502 size, kgsl_driver.stats.vmalloc);
503 return -ENOMEM;
504 }
505
Anshuman Danieecd5202012-02-17 19:52:49 +0530506 kmemleak_not_leak(ptr);
507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 protflags = GSL_PT_PAGE_RV;
509 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
510 protflags |= GSL_PT_PAGE_WV;
511
512 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
513 protflags);
514}
515EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
516
517int
518kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
519{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600520 int result = 0;
521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 size = ALIGN(size, PAGE_SIZE);
523
Jordan Croused17e9aa2011-10-12 16:57:48 -0600524 memdesc->size = size;
525 memdesc->ops = &kgsl_coherent_ops;
526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
528 GFP_KERNEL);
529 if (memdesc->hostptr == NULL) {
530 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600531 result = -ENOMEM;
532 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 }
534
Jordan Croused17e9aa2011-10-12 16:57:48 -0600535 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
536 if (result)
537 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538
539 /* Record statistics */
540
541 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
542 kgsl_driver.stats.coherent_max);
543
Jordan Croused17e9aa2011-10-12 16:57:48 -0600544err:
545 if (result)
546 kgsl_sharedmem_free(memdesc);
547
548 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700549}
550EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
551
552void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
553{
554 if (memdesc == NULL || memdesc->size == 0)
555 return;
556
557 if (memdesc->gpuaddr)
558 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
559
Jordan Croused17e9aa2011-10-12 16:57:48 -0600560 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 memdesc->ops->free(memdesc);
562
Jeff Boody28afec42012-01-18 15:47:46 -0700563 vfree(memdesc->sg);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600564
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565 memset(memdesc, 0, sizeof(*memdesc));
566}
567EXPORT_SYMBOL(kgsl_sharedmem_free);
568
569static int
570_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
571 struct kgsl_pagetable *pagetable, size_t size)
572{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600573 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574
Jordan Croused17e9aa2011-10-12 16:57:48 -0600575 memdesc->size = size;
576 memdesc->pagetable = pagetable;
577 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
579
580 if (memdesc->physaddr == 0) {
581 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
582 size);
583 return -ENOMEM;
584 }
585
Jordan Croused17e9aa2011-10-12 16:57:48 -0600586 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
587
588 if (result)
589 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590
591 result = kgsl_mmu_map(pagetable, memdesc,
592 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
593
594 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600595 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596
597 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
598 kgsl_driver.stats.coherent_max);
599
Jordan Croused17e9aa2011-10-12 16:57:48 -0600600err:
601 if (result)
602 kgsl_sharedmem_free(memdesc);
603
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700604 return result;
605}
606
607int
608kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
609 struct kgsl_pagetable *pagetable,
610 size_t size, int flags)
611{
612 size = ALIGN(size, PAGE_SIZE);
613 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
614}
615EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
616
617int
618kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
619 struct kgsl_pagetable *pagetable, size_t size)
620{
621 int result;
622 size = ALIGN(size, 8192);
623 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
624
625 if (result)
626 return result;
627
628 memdesc->hostptr = ioremap(memdesc->physaddr, size);
629
630 if (memdesc->hostptr == NULL) {
631 KGSL_CORE_ERR("ioremap failed\n");
632 kgsl_sharedmem_free(memdesc);
633 return -ENOMEM;
634 }
635
636 return 0;
637}
638EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
639
640int
641kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
642 uint32_t *dst,
643 unsigned int offsetbytes)
644{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700645 uint32_t *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700647 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
648 if (offsetbytes % sizeof(uint32_t) != 0)
649 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700651 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
652 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 return -ERANGE;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700654 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
655 *dst = *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 return 0;
657}
658EXPORT_SYMBOL(kgsl_sharedmem_readl);
659
660int
661kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
662 unsigned int offsetbytes,
663 uint32_t src)
664{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700665 uint32_t *dst;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700667 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
668 if (offsetbytes % sizeof(uint32_t) != 0)
669 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700671 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
672 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
673 return -ERANGE;
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600674 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700675 src, sizeof(uint32_t));
676 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
677 *dst = src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 return 0;
679}
680EXPORT_SYMBOL(kgsl_sharedmem_writel);
681
682int
683kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
684 unsigned int value, unsigned int sizebytes)
685{
686 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
687 BUG_ON(offsetbytes + sizebytes > memdesc->size);
688
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600689 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
690 sizebytes);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
692 return 0;
693}
694EXPORT_SYMBOL(kgsl_sharedmem_set);