blob: 7264c251ae0f27f6b7edf3d060dcbc4305de40eb [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/vmalloc.h>
14#include <linux/memory_alloc.h>
15#include <asm/cacheflush.h>
16
17#include "kgsl.h"
18#include "kgsl_sharedmem.h"
19#include "kgsl_cffdump.h"
20#include "kgsl_device.h"
21
Jordan Crouse1b897cf2011-10-12 16:57:48 -060022/* An attribute for showing per-process memory statistics */
23struct kgsl_mem_entry_attribute {
24 struct attribute attr;
25 int memtype;
26 ssize_t (*show)(struct kgsl_process_private *priv,
27 int type, char *buf);
28};
29
30#define to_mem_entry_attr(a) \
31container_of(a, struct kgsl_mem_entry_attribute, attr)
32
33#define __MEM_ENTRY_ATTR(_type, _name, _show) \
34{ \
35 .attr = { .name = __stringify(_name), .mode = 0444 }, \
36 .memtype = _type, \
37 .show = _show, \
38}
39
40/*
41 * A structure to hold the attributes for a particular memory type.
42 * For each memory type in each process we store the current and maximum
43 * memory usage and display the counts in sysfs. This structure and
44 * the following macro allow us to simplify the definition for those
45 * adding new memory types
46 */
47
48struct mem_entry_stats {
49 int memtype;
50 struct kgsl_mem_entry_attribute attr;
51 struct kgsl_mem_entry_attribute max_attr;
52};
53
54
55#define MEM_ENTRY_STAT(_type, _name) \
56{ \
57 .memtype = _type, \
58 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
59 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
60 mem_entry_max_show), \
61}
62
63
64/**
65 * Given a kobj, find the process structure attached to it
66 */
67
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068static struct kgsl_process_private *
69_get_priv_from_kobj(struct kobject *kobj)
70{
71 struct kgsl_process_private *private;
72 unsigned long name;
73
74 if (!kobj)
75 return NULL;
76
77 if (sscanf(kobj->name, "%ld", &name) != 1)
78 return NULL;
79
80 list_for_each_entry(private, &kgsl_driver.process_list, list) {
81 if (private->pid == name)
82 return private;
83 }
84
85 return NULL;
86}
87
Jordan Crouse1b897cf2011-10-12 16:57:48 -060088/**
89 * Show the current amount of memory allocated for the given memtype
90 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
92static ssize_t
Jordan Crouse1b897cf2011-10-12 16:57:48 -060093mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094{
Jordan Crouse1b897cf2011-10-12 16:57:48 -060095 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
96}
97
98/**
99 * Show the maximum memory allocated for the given memtype through the life of
100 * the process
101 */
102
103static ssize_t
104mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
105{
106 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
107}
108
109
110static void mem_entry_sysfs_release(struct kobject *kobj)
111{
112}
113
114static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
115 struct attribute *attr, char *buf)
116{
117 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118 struct kgsl_process_private *priv;
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600119 ssize_t ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120
121 mutex_lock(&kgsl_driver.process_mutex);
122 priv = _get_priv_from_kobj(kobj);
123
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600124 if (priv && pattr->show)
125 ret = pattr->show(priv, pattr->memtype, buf);
126 else
127 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128
129 mutex_unlock(&kgsl_driver.process_mutex);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600130 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131}
132
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600133static const struct sysfs_ops mem_entry_sysfs_ops = {
134 .show = mem_entry_sysfs_show,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135};
136
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600137static struct kobj_type ktype_mem_entry = {
138 .sysfs_ops = &mem_entry_sysfs_ops,
139 .default_attrs = NULL,
140 .release = mem_entry_sysfs_release
141};
142
143static struct mem_entry_stats mem_stats[] = {
144 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
145#ifdef CONFIG_ANDROID_PMEM
146 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
147#endif
148#ifdef CONFIG_ASHMEM
149 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
150#endif
151 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600152#ifdef CONFIG_ION
Jeremy Gebbenff6eab02012-01-09 09:42:21 -0700153 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600154#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155};
156
157void
158kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
159{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600160 int i;
161
162 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
163 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
164 sysfs_remove_file(&private->kobj,
165 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 }
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600167
168 kobject_put(&private->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169}
170
171void
172kgsl_process_init_sysfs(struct kgsl_process_private *private)
173{
174 unsigned char name[16];
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600175 int i, ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 snprintf(name, sizeof(name), "%d", private->pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600179 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
180 kgsl_driver.prockobj, name))
181 return;
182
183 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
184 /* We need to check the value of sysfs_create_file, but we
185 * don't really care if it passed or not */
186
187 ret = sysfs_create_file(&private->kobj,
188 &mem_stats[i].attr.attr);
189 ret = sysfs_create_file(&private->kobj,
190 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 }
192}
193
194static int kgsl_drv_memstat_show(struct device *dev,
195 struct device_attribute *attr,
196 char *buf)
197{
198 unsigned int val = 0;
199
200 if (!strncmp(attr->attr.name, "vmalloc", 7))
201 val = kgsl_driver.stats.vmalloc;
202 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
203 val = kgsl_driver.stats.vmalloc_max;
204 else if (!strncmp(attr->attr.name, "coherent", 8))
205 val = kgsl_driver.stats.coherent;
206 else if (!strncmp(attr->attr.name, "coherent_max", 12))
207 val = kgsl_driver.stats.coherent_max;
208 else if (!strncmp(attr->attr.name, "mapped", 6))
209 val = kgsl_driver.stats.mapped;
210 else if (!strncmp(attr->attr.name, "mapped_max", 10))
211 val = kgsl_driver.stats.mapped_max;
212
213 return snprintf(buf, PAGE_SIZE, "%u\n", val);
214}
215
216static int kgsl_drv_histogram_show(struct device *dev,
217 struct device_attribute *attr,
218 char *buf)
219{
220 int len = 0;
221 int i;
222
223 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600224 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 kgsl_driver.stats.histogram[i]);
226
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600227 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 return len;
229}
230
231DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
232DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
233DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
234DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
235DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
236DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
237DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
238
239static const struct device_attribute *drv_attr_list[] = {
240 &dev_attr_vmalloc,
241 &dev_attr_vmalloc_max,
242 &dev_attr_coherent,
243 &dev_attr_coherent_max,
244 &dev_attr_mapped,
245 &dev_attr_mapped_max,
246 &dev_attr_histogram,
247 NULL
248};
249
250void
251kgsl_sharedmem_uninit_sysfs(void)
252{
253 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
254}
255
256int
257kgsl_sharedmem_init_sysfs(void)
258{
259 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
260 drv_attr_list);
261}
262
263#ifdef CONFIG_OUTER_CACHE
264static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
265{
266 switch (op) {
267 case KGSL_CACHE_OP_FLUSH:
268 outer_flush_range(addr, addr + size);
269 break;
270 case KGSL_CACHE_OP_CLEAN:
271 outer_clean_range(addr, addr + size);
272 break;
273 case KGSL_CACHE_OP_INV:
274 outer_inv_range(addr, addr + size);
275 break;
276 }
277}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278
Jordan Croused17e9aa2011-10-12 16:57:48 -0600279static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600281 struct scatterlist *s;
282 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
Jordan Croused17e9aa2011-10-12 16:57:48 -0600284 for_each_sg(sg, s, sglen, i) {
285 unsigned int paddr = sg_phys(s);
286 _outer_cache_range_op(op, paddr, s->length);
287 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288}
289
Jordan Croused17e9aa2011-10-12 16:57:48 -0600290#else
291static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293}
294#endif
295
296static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
297 struct vm_area_struct *vma,
298 struct vm_fault *vmf)
299{
300 unsigned long offset, pg;
301 struct page *page;
302
303 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
304 pg = (unsigned long) memdesc->hostptr + offset;
305
306 page = vmalloc_to_page((void *) pg);
307 if (page == NULL)
308 return VM_FAULT_SIGBUS;
309
310 get_page(page);
311
312 vmf->page = page;
313 return 0;
314}
315
316static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
317{
318 return VM_RESERVED | VM_DONTEXPAND;
319}
320
321static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
322{
323 kgsl_driver.stats.vmalloc -= memdesc->size;
324 vfree(memdesc->hostptr);
325}
326
327static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
328{
329 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
330}
331
332static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
333 struct vm_area_struct *vma,
334 struct vm_fault *vmf)
335{
336 unsigned long offset, pfn;
337 int ret;
338
339 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
340 PAGE_SHIFT;
341
342 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
343 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
344
345 if (ret == -ENOMEM || ret == -EAGAIN)
346 return VM_FAULT_OOM;
347 else if (ret == -EFAULT)
348 return VM_FAULT_SIGBUS;
349
350 return VM_FAULT_NOPAGE;
351}
352
353static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
354
355{
356 kgsl_driver.stats.coherent -= memdesc->size;
357 if (memdesc->hostptr)
358 iounmap(memdesc->hostptr);
359
360 free_contiguous_memory_by_paddr(memdesc->physaddr);
361}
362
363static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
364{
365 kgsl_driver.stats.coherent -= memdesc->size;
366 dma_free_coherent(NULL, memdesc->size,
367 memdesc->hostptr, memdesc->physaddr);
368}
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370/* Global - also used by kgsl_drm.c */
371struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 .free = kgsl_vmalloc_free,
373 .vmflags = kgsl_vmalloc_vmflags,
374 .vmfault = kgsl_vmalloc_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375};
376EXPORT_SYMBOL(kgsl_vmalloc_ops);
377
378static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379 .free = kgsl_ebimem_free,
380 .vmflags = kgsl_contiguous_vmflags,
381 .vmfault = kgsl_contiguous_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382};
383
384static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386};
387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700388void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
389{
390 void *addr = memdesc->hostptr;
391 int size = memdesc->size;
392
393 switch (op) {
394 case KGSL_CACHE_OP_FLUSH:
395 dmac_flush_range(addr, addr + size);
396 break;
397 case KGSL_CACHE_OP_CLEAN:
398 dmac_clean_range(addr, addr + size);
399 break;
400 case KGSL_CACHE_OP_INV:
401 dmac_inv_range(addr, addr + size);
402 break;
403 }
404
Jordan Croused17e9aa2011-10-12 16:57:48 -0600405 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406}
407EXPORT_SYMBOL(kgsl_cache_range_op);
408
409static int
410_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
411 struct kgsl_pagetable *pagetable,
412 void *ptr, size_t size, unsigned int protflags)
413{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600414 int order, ret = 0;
415 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
416 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417
418 memdesc->size = size;
419 memdesc->pagetable = pagetable;
420 memdesc->priv = KGSL_MEMFLAGS_CACHED;
421 memdesc->ops = &kgsl_vmalloc_ops;
422 memdesc->hostptr = (void *) ptr;
423
Jeff Boody28afec42012-01-18 15:47:46 -0700424 memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -0600425 if (memdesc->sg == NULL) {
426 ret = -ENOMEM;
427 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700428 }
429
Jordan Croused17e9aa2011-10-12 16:57:48 -0600430 memdesc->sglen = sglen;
431 sg_init_table(memdesc->sg, sglen);
432
433 for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
434 struct page *page = vmalloc_to_page(ptr);
435 if (!page) {
436 ret = -EINVAL;
437 goto done;
438 }
439 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
440 }
441
442 kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
443
444 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
445
446 if (ret)
447 goto done;
448
449 KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
450 kgsl_driver.stats.vmalloc_max);
451
452 order = get_order(size);
453
454 if (order < 16)
455 kgsl_driver.stats.histogram[order]++;
456
457done:
458 if (ret)
459 kgsl_sharedmem_free(memdesc);
460
461 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462}
463
464int
465kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
466 struct kgsl_pagetable *pagetable, size_t size)
467{
468 void *ptr;
469
470 BUG_ON(size == 0);
471
472 size = ALIGN(size, PAGE_SIZE * 2);
473 ptr = vmalloc(size);
474
475 if (ptr == NULL) {
476 KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
477 return -ENOMEM;
478 }
479
480 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
481 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
482}
483EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
484
485int
486kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
487 struct kgsl_pagetable *pagetable,
488 size_t size, int flags)
489{
490 void *ptr;
491 unsigned int protflags;
492
493 BUG_ON(size == 0);
494 ptr = vmalloc_user(size);
495
496 if (ptr == NULL) {
497 KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
498 size, kgsl_driver.stats.vmalloc);
499 return -ENOMEM;
500 }
501
502 protflags = GSL_PT_PAGE_RV;
503 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
504 protflags |= GSL_PT_PAGE_WV;
505
506 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
507 protflags);
508}
509EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
510
511int
512kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
513{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600514 int result = 0;
515
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 size = ALIGN(size, PAGE_SIZE);
517
Jordan Croused17e9aa2011-10-12 16:57:48 -0600518 memdesc->size = size;
519 memdesc->ops = &kgsl_coherent_ops;
520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
522 GFP_KERNEL);
523 if (memdesc->hostptr == NULL) {
524 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600525 result = -ENOMEM;
526 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 }
528
Jordan Croused17e9aa2011-10-12 16:57:48 -0600529 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
530 if (result)
531 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532
533 /* Record statistics */
534
535 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
536 kgsl_driver.stats.coherent_max);
537
Jordan Croused17e9aa2011-10-12 16:57:48 -0600538err:
539 if (result)
540 kgsl_sharedmem_free(memdesc);
541
542 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543}
544EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
545
546void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
547{
548 if (memdesc == NULL || memdesc->size == 0)
549 return;
550
551 if (memdesc->gpuaddr)
552 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
553
Jordan Croused17e9aa2011-10-12 16:57:48 -0600554 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 memdesc->ops->free(memdesc);
556
Jeff Boody28afec42012-01-18 15:47:46 -0700557 vfree(memdesc->sg);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600558
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 memset(memdesc, 0, sizeof(*memdesc));
560}
561EXPORT_SYMBOL(kgsl_sharedmem_free);
562
563static int
564_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
565 struct kgsl_pagetable *pagetable, size_t size)
566{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600567 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568
Jordan Croused17e9aa2011-10-12 16:57:48 -0600569 memdesc->size = size;
570 memdesc->pagetable = pagetable;
571 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
573
574 if (memdesc->physaddr == 0) {
575 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
576 size);
577 return -ENOMEM;
578 }
579
Jordan Croused17e9aa2011-10-12 16:57:48 -0600580 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
581
582 if (result)
583 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584
585 result = kgsl_mmu_map(pagetable, memdesc,
586 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
587
588 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600589 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590
591 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
592 kgsl_driver.stats.coherent_max);
593
Jordan Croused17e9aa2011-10-12 16:57:48 -0600594err:
595 if (result)
596 kgsl_sharedmem_free(memdesc);
597
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 return result;
599}
600
601int
602kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
603 struct kgsl_pagetable *pagetable,
604 size_t size, int flags)
605{
606 size = ALIGN(size, PAGE_SIZE);
607 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
608}
609EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
610
611int
612kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
613 struct kgsl_pagetable *pagetable, size_t size)
614{
615 int result;
616 size = ALIGN(size, 8192);
617 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
618
619 if (result)
620 return result;
621
622 memdesc->hostptr = ioremap(memdesc->physaddr, size);
623
624 if (memdesc->hostptr == NULL) {
625 KGSL_CORE_ERR("ioremap failed\n");
626 kgsl_sharedmem_free(memdesc);
627 return -ENOMEM;
628 }
629
630 return 0;
631}
632EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
633
634int
635kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
636 uint32_t *dst,
637 unsigned int offsetbytes)
638{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700639 uint32_t *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700641 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
642 if (offsetbytes % sizeof(uint32_t) != 0)
643 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700645 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
646 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700647 return -ERANGE;
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700648 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
649 *dst = *src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 return 0;
651}
652EXPORT_SYMBOL(kgsl_sharedmem_readl);
653
654int
655kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
656 unsigned int offsetbytes,
657 uint32_t src)
658{
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700659 uint32_t *dst;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700660 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700661 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
662 if (offsetbytes % sizeof(uint32_t) != 0)
663 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700665 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
666 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
667 return -ERANGE;
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600668 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
Jeremy Gebbenaba13272012-01-31 17:31:23 -0700669 src, sizeof(uint32_t));
670 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
671 *dst = src;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 return 0;
673}
674EXPORT_SYMBOL(kgsl_sharedmem_writel);
675
676int
677kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
678 unsigned int value, unsigned int sizebytes)
679{
680 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
681 BUG_ON(offsetbytes + sizebytes > memdesc->size);
682
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600683 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
684 sizebytes);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
686 return 0;
687}
688EXPORT_SYMBOL(kgsl_sharedmem_set);