blob: f3ed531e775e929b70bc676295d69fc6b3adeb65 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/vmalloc.h>
14#include <linux/memory_alloc.h>
15#include <asm/cacheflush.h>
16
17#include "kgsl.h"
18#include "kgsl_sharedmem.h"
19#include "kgsl_cffdump.h"
20#include "kgsl_device.h"
21
Jordan Crouse1b897cf2011-10-12 16:57:48 -060022/* An attribute for showing per-process memory statistics */
23struct kgsl_mem_entry_attribute {
24 struct attribute attr;
25 int memtype;
26 ssize_t (*show)(struct kgsl_process_private *priv,
27 int type, char *buf);
28};
29
30#define to_mem_entry_attr(a) \
31container_of(a, struct kgsl_mem_entry_attribute, attr)
32
33#define __MEM_ENTRY_ATTR(_type, _name, _show) \
34{ \
35 .attr = { .name = __stringify(_name), .mode = 0444 }, \
36 .memtype = _type, \
37 .show = _show, \
38}
39
40/*
41 * A structure to hold the attributes for a particular memory type.
42 * For each memory type in each process we store the current and maximum
43 * memory usage and display the counts in sysfs. This structure and
44 * the following macro allow us to simplify the definition for those
45 * adding new memory types
46 */
47
48struct mem_entry_stats {
49 int memtype;
50 struct kgsl_mem_entry_attribute attr;
51 struct kgsl_mem_entry_attribute max_attr;
52};
53
54
55#define MEM_ENTRY_STAT(_type, _name) \
56{ \
57 .memtype = _type, \
58 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
59 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
60 mem_entry_max_show), \
61}
62
63
64/**
65 * Given a kobj, find the process structure attached to it
66 */
67
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068static struct kgsl_process_private *
69_get_priv_from_kobj(struct kobject *kobj)
70{
71 struct kgsl_process_private *private;
72 unsigned long name;
73
74 if (!kobj)
75 return NULL;
76
77 if (sscanf(kobj->name, "%ld", &name) != 1)
78 return NULL;
79
80 list_for_each_entry(private, &kgsl_driver.process_list, list) {
81 if (private->pid == name)
82 return private;
83 }
84
85 return NULL;
86}
87
Jordan Crouse1b897cf2011-10-12 16:57:48 -060088/**
89 * Show the current amount of memory allocated for the given memtype
90 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
92static ssize_t
Jordan Crouse1b897cf2011-10-12 16:57:48 -060093mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094{
Jordan Crouse1b897cf2011-10-12 16:57:48 -060095 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
96}
97
98/**
99 * Show the maximum memory allocated for the given memtype through the life of
100 * the process
101 */
102
103static ssize_t
104mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
105{
106 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
107}
108
109
110static void mem_entry_sysfs_release(struct kobject *kobj)
111{
112}
113
114static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
115 struct attribute *attr, char *buf)
116{
117 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700118 struct kgsl_process_private *priv;
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600119 ssize_t ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120
121 mutex_lock(&kgsl_driver.process_mutex);
122 priv = _get_priv_from_kobj(kobj);
123
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600124 if (priv && pattr->show)
125 ret = pattr->show(priv, pattr->memtype, buf);
126 else
127 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700128
129 mutex_unlock(&kgsl_driver.process_mutex);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600130 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131}
132
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600133static const struct sysfs_ops mem_entry_sysfs_ops = {
134 .show = mem_entry_sysfs_show,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135};
136
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600137static struct kobj_type ktype_mem_entry = {
138 .sysfs_ops = &mem_entry_sysfs_ops,
139 .default_attrs = NULL,
140 .release = mem_entry_sysfs_release
141};
142
143static struct mem_entry_stats mem_stats[] = {
144 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
145#ifdef CONFIG_ANDROID_PMEM
146 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
147#endif
148#ifdef CONFIG_ASHMEM
149 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
150#endif
151 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152};
153
154void
155kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
156{
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600157 int i;
158
159 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
160 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
161 sysfs_remove_file(&private->kobj,
162 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163 }
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600164
165 kobject_put(&private->kobj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166}
167
168void
169kgsl_process_init_sysfs(struct kgsl_process_private *private)
170{
171 unsigned char name[16];
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600172 int i, ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 snprintf(name, sizeof(name), "%d", private->pid);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600176 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
177 kgsl_driver.prockobj, name))
178 return;
179
180 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
181 /* We need to check the value of sysfs_create_file, but we
182 * don't really care if it passed or not */
183
184 ret = sysfs_create_file(&private->kobj,
185 &mem_stats[i].attr.attr);
186 ret = sysfs_create_file(&private->kobj,
187 &mem_stats[i].max_attr.attr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 }
189}
190
191static int kgsl_drv_memstat_show(struct device *dev,
192 struct device_attribute *attr,
193 char *buf)
194{
195 unsigned int val = 0;
196
197 if (!strncmp(attr->attr.name, "vmalloc", 7))
198 val = kgsl_driver.stats.vmalloc;
199 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
200 val = kgsl_driver.stats.vmalloc_max;
201 else if (!strncmp(attr->attr.name, "coherent", 8))
202 val = kgsl_driver.stats.coherent;
203 else if (!strncmp(attr->attr.name, "coherent_max", 12))
204 val = kgsl_driver.stats.coherent_max;
205 else if (!strncmp(attr->attr.name, "mapped", 6))
206 val = kgsl_driver.stats.mapped;
207 else if (!strncmp(attr->attr.name, "mapped_max", 10))
208 val = kgsl_driver.stats.mapped_max;
209
210 return snprintf(buf, PAGE_SIZE, "%u\n", val);
211}
212
213static int kgsl_drv_histogram_show(struct device *dev,
214 struct device_attribute *attr,
215 char *buf)
216{
217 int len = 0;
218 int i;
219
220 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600221 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 kgsl_driver.stats.histogram[i]);
223
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600224 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 return len;
226}
227
228DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
229DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
230DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
231DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
232DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
233DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
234DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
235
236static const struct device_attribute *drv_attr_list[] = {
237 &dev_attr_vmalloc,
238 &dev_attr_vmalloc_max,
239 &dev_attr_coherent,
240 &dev_attr_coherent_max,
241 &dev_attr_mapped,
242 &dev_attr_mapped_max,
243 &dev_attr_histogram,
244 NULL
245};
246
247void
248kgsl_sharedmem_uninit_sysfs(void)
249{
250 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
251}
252
253int
254kgsl_sharedmem_init_sysfs(void)
255{
256 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
257 drv_attr_list);
258}
259
260#ifdef CONFIG_OUTER_CACHE
261static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
262{
263 switch (op) {
264 case KGSL_CACHE_OP_FLUSH:
265 outer_flush_range(addr, addr + size);
266 break;
267 case KGSL_CACHE_OP_CLEAN:
268 outer_clean_range(addr, addr + size);
269 break;
270 case KGSL_CACHE_OP_INV:
271 outer_inv_range(addr, addr + size);
272 break;
273 }
274}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
Jordan Croused17e9aa2011-10-12 16:57:48 -0600276static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600278 struct scatterlist *s;
279 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280
Jordan Croused17e9aa2011-10-12 16:57:48 -0600281 for_each_sg(sg, s, sglen, i) {
282 unsigned int paddr = sg_phys(s);
283 _outer_cache_range_op(op, paddr, s->length);
284 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285}
286
Jordan Croused17e9aa2011-10-12 16:57:48 -0600287#else
288static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290}
291#endif
292
293static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
294 struct vm_area_struct *vma,
295 struct vm_fault *vmf)
296{
297 unsigned long offset, pg;
298 struct page *page;
299
300 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
301 pg = (unsigned long) memdesc->hostptr + offset;
302
303 page = vmalloc_to_page((void *) pg);
304 if (page == NULL)
305 return VM_FAULT_SIGBUS;
306
307 get_page(page);
308
309 vmf->page = page;
310 return 0;
311}
312
313static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
314{
315 return VM_RESERVED | VM_DONTEXPAND;
316}
317
318static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
319{
320 kgsl_driver.stats.vmalloc -= memdesc->size;
321 vfree(memdesc->hostptr);
322}
323
324static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
325{
326 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
327}
328
329static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
330 struct vm_area_struct *vma,
331 struct vm_fault *vmf)
332{
333 unsigned long offset, pfn;
334 int ret;
335
336 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
337 PAGE_SHIFT;
338
339 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
340 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
341
342 if (ret == -ENOMEM || ret == -EAGAIN)
343 return VM_FAULT_OOM;
344 else if (ret == -EFAULT)
345 return VM_FAULT_SIGBUS;
346
347 return VM_FAULT_NOPAGE;
348}
349
350static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
351
352{
353 kgsl_driver.stats.coherent -= memdesc->size;
354 if (memdesc->hostptr)
355 iounmap(memdesc->hostptr);
356
357 free_contiguous_memory_by_paddr(memdesc->physaddr);
358}
359
360static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
361{
362 kgsl_driver.stats.coherent -= memdesc->size;
363 dma_free_coherent(NULL, memdesc->size,
364 memdesc->hostptr, memdesc->physaddr);
365}
366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367/* Global - also used by kgsl_drm.c */
368struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 .free = kgsl_vmalloc_free,
370 .vmflags = kgsl_vmalloc_vmflags,
371 .vmfault = kgsl_vmalloc_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372};
373EXPORT_SYMBOL(kgsl_vmalloc_ops);
374
375static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 .free = kgsl_ebimem_free,
377 .vmflags = kgsl_contiguous_vmflags,
378 .vmfault = kgsl_contiguous_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379};
380
381static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700382 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383};
384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
386{
387 void *addr = memdesc->hostptr;
388 int size = memdesc->size;
389
390 switch (op) {
391 case KGSL_CACHE_OP_FLUSH:
392 dmac_flush_range(addr, addr + size);
393 break;
394 case KGSL_CACHE_OP_CLEAN:
395 dmac_clean_range(addr, addr + size);
396 break;
397 case KGSL_CACHE_OP_INV:
398 dmac_inv_range(addr, addr + size);
399 break;
400 }
401
Jordan Croused17e9aa2011-10-12 16:57:48 -0600402 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700403}
404EXPORT_SYMBOL(kgsl_cache_range_op);
405
406static int
407_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
408 struct kgsl_pagetable *pagetable,
409 void *ptr, size_t size, unsigned int protflags)
410{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600411 int order, ret = 0;
412 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
413 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414
415 memdesc->size = size;
416 memdesc->pagetable = pagetable;
417 memdesc->priv = KGSL_MEMFLAGS_CACHED;
418 memdesc->ops = &kgsl_vmalloc_ops;
419 memdesc->hostptr = (void *) ptr;
420
Jordan Croused17e9aa2011-10-12 16:57:48 -0600421 memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
422 if (memdesc->sg == NULL) {
423 ret = -ENOMEM;
424 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 }
426
Jordan Croused17e9aa2011-10-12 16:57:48 -0600427 memdesc->sglen = sglen;
428 sg_init_table(memdesc->sg, sglen);
429
430 for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
431 struct page *page = vmalloc_to_page(ptr);
432 if (!page) {
433 ret = -EINVAL;
434 goto done;
435 }
436 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
437 }
438
439 kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
440
441 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
442
443 if (ret)
444 goto done;
445
446 KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
447 kgsl_driver.stats.vmalloc_max);
448
449 order = get_order(size);
450
451 if (order < 16)
452 kgsl_driver.stats.histogram[order]++;
453
454done:
455 if (ret)
456 kgsl_sharedmem_free(memdesc);
457
458 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459}
460
461int
462kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
463 struct kgsl_pagetable *pagetable, size_t size)
464{
465 void *ptr;
466
467 BUG_ON(size == 0);
468
469 size = ALIGN(size, PAGE_SIZE * 2);
470 ptr = vmalloc(size);
471
472 if (ptr == NULL) {
473 KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
474 return -ENOMEM;
475 }
476
477 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
478 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
479}
480EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
481
482int
483kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
484 struct kgsl_pagetable *pagetable,
485 size_t size, int flags)
486{
487 void *ptr;
488 unsigned int protflags;
489
490 BUG_ON(size == 0);
491 ptr = vmalloc_user(size);
492
493 if (ptr == NULL) {
494 KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
495 size, kgsl_driver.stats.vmalloc);
496 return -ENOMEM;
497 }
498
499 protflags = GSL_PT_PAGE_RV;
500 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
501 protflags |= GSL_PT_PAGE_WV;
502
503 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
504 protflags);
505}
506EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
507
508int
509kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
510{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600511 int result = 0;
512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 size = ALIGN(size, PAGE_SIZE);
514
Jordan Croused17e9aa2011-10-12 16:57:48 -0600515 memdesc->size = size;
516 memdesc->ops = &kgsl_coherent_ops;
517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
519 GFP_KERNEL);
520 if (memdesc->hostptr == NULL) {
521 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600522 result = -ENOMEM;
523 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 }
525
Jordan Croused17e9aa2011-10-12 16:57:48 -0600526 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
527 if (result)
528 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 /* Record statistics */
531
532 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
533 kgsl_driver.stats.coherent_max);
534
Jordan Croused17e9aa2011-10-12 16:57:48 -0600535err:
536 if (result)
537 kgsl_sharedmem_free(memdesc);
538
539 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540}
541EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
542
543void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
544{
545 if (memdesc == NULL || memdesc->size == 0)
546 return;
547
548 if (memdesc->gpuaddr)
549 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
550
Jordan Croused17e9aa2011-10-12 16:57:48 -0600551 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 memdesc->ops->free(memdesc);
553
Jordan Croused17e9aa2011-10-12 16:57:48 -0600554 kfree(memdesc->sg);
555
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700556 memset(memdesc, 0, sizeof(*memdesc));
557}
558EXPORT_SYMBOL(kgsl_sharedmem_free);
559
560static int
561_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
562 struct kgsl_pagetable *pagetable, size_t size)
563{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600564 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565
Jordan Croused17e9aa2011-10-12 16:57:48 -0600566 memdesc->size = size;
567 memdesc->pagetable = pagetable;
568 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700569 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
570
571 if (memdesc->physaddr == 0) {
572 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
573 size);
574 return -ENOMEM;
575 }
576
Jordan Croused17e9aa2011-10-12 16:57:48 -0600577 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
578
579 if (result)
580 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
582 result = kgsl_mmu_map(pagetable, memdesc,
583 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
584
585 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600586 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587
588 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
589 kgsl_driver.stats.coherent_max);
590
Jordan Croused17e9aa2011-10-12 16:57:48 -0600591err:
592 if (result)
593 kgsl_sharedmem_free(memdesc);
594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 return result;
596}
597
598int
599kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
600 struct kgsl_pagetable *pagetable,
601 size_t size, int flags)
602{
603 size = ALIGN(size, PAGE_SIZE);
604 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
605}
606EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
607
608int
609kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
610 struct kgsl_pagetable *pagetable, size_t size)
611{
612 int result;
613 size = ALIGN(size, 8192);
614 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
615
616 if (result)
617 return result;
618
619 memdesc->hostptr = ioremap(memdesc->physaddr, size);
620
621 if (memdesc->hostptr == NULL) {
622 KGSL_CORE_ERR("ioremap failed\n");
623 kgsl_sharedmem_free(memdesc);
624 return -ENOMEM;
625 }
626
627 return 0;
628}
629EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
630
631int
632kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
633 uint32_t *dst,
634 unsigned int offsetbytes)
635{
636 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
637 WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
638
639 if (offsetbytes + sizeof(unsigned int) > memdesc->size)
640 return -ERANGE;
641
642 *dst = readl_relaxed(memdesc->hostptr + offsetbytes);
643 return 0;
644}
645EXPORT_SYMBOL(kgsl_sharedmem_readl);
646
647int
648kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
649 unsigned int offsetbytes,
650 uint32_t src)
651{
652 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
653 BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
654
655 kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes,
656 src, sizeof(uint));
657 writel_relaxed(src, memdesc->hostptr + offsetbytes);
658 return 0;
659}
660EXPORT_SYMBOL(kgsl_sharedmem_writel);
661
662int
663kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
664 unsigned int value, unsigned int sizebytes)
665{
666 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
667 BUG_ON(offsetbytes + sizebytes > memdesc->size);
668
669 kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes, value,
670 sizebytes);
671 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
672 return 0;
673}
674EXPORT_SYMBOL(kgsl_sharedmem_set);