blob: d4a8f9241ef2f96a5928ac038bd4ead737039164 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/export.h>
15#include <linux/vmalloc.h>
16#include <linux/memory_alloc.h>
17#include <asm/cacheflush.h>
18#include <linux/slab.h>
19#include <linux/kmemleak.h>
20#include <linux/highmem.h>
21
22#include "kgsl.h"
23#include "kgsl_sharedmem.h"
24#include "kgsl_cffdump.h"
25#include "kgsl_device.h"
26
27struct ion_client* kgsl_client = NULL;
28
29struct kgsl_mem_entry_attribute {
30 struct attribute attr;
31 int memtype;
32 ssize_t (*show)(struct kgsl_process_private *priv,
33 int type, char *buf);
34};
35
36#define to_mem_entry_attr(a) \
37container_of(a, struct kgsl_mem_entry_attribute, attr)
38
39#define __MEM_ENTRY_ATTR(_type, _name, _show) \
40{ \
41 .attr = { .name = __stringify(_name), .mode = 0444 }, \
42 .memtype = _type, \
43 .show = _show, \
44}
45
46#ifdef CONFIG_MSM_KGSL_GPU_USAGE
47static ssize_t
48gpubusy_show(struct kgsl_process_private *priv, int type, char *buf)
49{
50 char* tmp = buf;
51 int i;
52
53 tmp = (char*)((int)tmp + snprintf(tmp, PAGE_SIZE, "%lld %lld", priv->gputime.total, priv->gputime.busy));
54 for(i=0;i<KGSL_MAX_PWRLEVELS;i++)
55 tmp = (char*)( (int)tmp + snprintf(tmp, PAGE_SIZE - (int)(tmp-buf), " %lld %lld", priv->gputime_in_state[i].total, priv->gputime_in_state[i].busy));
56 tmp = (char*)((int)tmp + snprintf(tmp, PAGE_SIZE, "\n"));
57 return (ssize_t)(tmp - buf);
58}
59
60static struct kgsl_mem_entry_attribute gpubusy = __MEM_ENTRY_ATTR(0, gpubusy, gpubusy_show);
61#endif
62
63
64struct mem_entry_stats {
65 int memtype;
66 struct kgsl_mem_entry_attribute attr;
67 struct kgsl_mem_entry_attribute max_attr;
68};
69
70
71#define MEM_ENTRY_STAT(_type, _name) \
72{ \
73 .memtype = _type, \
74 .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
75 .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
76 mem_entry_max_show), \
77}
78
79
80
81static struct page *kgsl_guard_page;
82
83
84static struct kgsl_process_private *
85_get_priv_from_kobj(struct kobject *kobj)
86{
87 struct kgsl_process_private *private;
88 unsigned long name;
89
90 if (!kobj)
91 return NULL;
92
93 if (sscanf(kobj->name, "%ld", &name) != 1)
94 return NULL;
95
96 list_for_each_entry(private, &kgsl_driver.process_list, list) {
97 if (private->pid == name)
98 return private;
99 }
100
101 return NULL;
102}
103
104
105static ssize_t
106mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
107{
108 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
109}
110
111
112static ssize_t
113mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
114{
115 return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
116}
117
118
119static void mem_entry_sysfs_release(struct kobject *kobj)
120{
121}
122
123static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
124 struct attribute *attr, char *buf)
125{
126 struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
127 struct kgsl_process_private *priv;
128 ssize_t ret;
129
130 mutex_lock(&kgsl_driver.process_mutex);
131 priv = _get_priv_from_kobj(kobj);
132
133 if (priv && pattr->show)
134 ret = pattr->show(priv, pattr->memtype, buf);
135 else
136 ret = -EIO;
137
138 mutex_unlock(&kgsl_driver.process_mutex);
139 return ret;
140}
141
142static const struct sysfs_ops mem_entry_sysfs_ops = {
143 .show = mem_entry_sysfs_show,
144};
145
146static struct kobj_type ktype_mem_entry = {
147 .sysfs_ops = &mem_entry_sysfs_ops,
148 .default_attrs = NULL,
149 .release = mem_entry_sysfs_release
150};
151
152static struct mem_entry_stats mem_stats[] = {
153 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
154#ifdef CONFIG_ANDROID_PMEM
155 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
156#endif
157#ifdef CONFIG_ASHMEM
158 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
159#endif
160 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
161#ifdef CONFIG_ION
162 MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
163#endif
164};
165
166void
167kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
168{
169 int i;
170
171 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
172 sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
173 sysfs_remove_file(&private->kobj,
174 &mem_stats[i].max_attr.attr);
175 }
176
177#ifdef CONFIG_MSM_KGSL_GPU_USAGE
178 sysfs_remove_file(&private->kobj, &gpubusy.attr);
179#endif
180 kobject_put(&private->kobj);
181}
182
183void
184kgsl_process_init_sysfs(struct kgsl_process_private *private)
185{
186 unsigned char name[16];
187 int i, ret;
188
189 snprintf(name, sizeof(name), "%d", private->pid);
190
191 if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
192 kgsl_driver.prockobj, name))
193 return;
194
195 for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
196
197 ret = sysfs_create_file(&private->kobj,
198 &mem_stats[i].attr.attr);
199 ret = sysfs_create_file(&private->kobj,
200 &mem_stats[i].max_attr.attr);
201 }
202#ifdef CONFIG_MSM_KGSL_GPU_USAGE
203 ret = sysfs_create_file(&private->kobj, &gpubusy.attr);
204#endif
205}
206
207static int kgsl_drv_memstat_show(struct device *dev,
208 struct device_attribute *attr,
209 char *buf)
210{
211 unsigned int val = 0;
212
213 if (!strncmp(attr->attr.name, "vmalloc", 7))
214 val = kgsl_driver.stats.vmalloc;
215 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
216 val = kgsl_driver.stats.vmalloc_max;
217 else if (!strncmp(attr->attr.name, "page_alloc", 10))
218 val = kgsl_driver.stats.page_alloc;
219 else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
220 val = kgsl_driver.stats.page_alloc_max;
221 else if (!strncmp(attr->attr.name, "coherent", 8))
222 val = kgsl_driver.stats.coherent;
223 else if (!strncmp(attr->attr.name, "coherent_max", 12))
224 val = kgsl_driver.stats.coherent_max;
225 else if (!strncmp(attr->attr.name, "mapped", 6))
226 val = kgsl_driver.stats.mapped;
227 else if (!strncmp(attr->attr.name, "mapped_max", 10))
228 val = kgsl_driver.stats.mapped_max;
229
230 return snprintf(buf, PAGE_SIZE, "%u\n", val);
231}
232
233static int kgsl_drv_histogram_show(struct device *dev,
234 struct device_attribute *attr,
235 char *buf)
236{
237 int len = 0;
238 int i;
239
240 for (i = 0; i < 16; i++)
241 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
242 kgsl_driver.stats.histogram[i]);
243
244 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
245 return len;
246}
247
248DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
249DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
250DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
251DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
252DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
253DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
254DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
255DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
256DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
257
258static const struct device_attribute *drv_attr_list[] = {
259 &dev_attr_vmalloc,
260 &dev_attr_vmalloc_max,
261 &dev_attr_page_alloc,
262 &dev_attr_page_alloc_max,
263 &dev_attr_coherent,
264 &dev_attr_coherent_max,
265 &dev_attr_mapped,
266 &dev_attr_mapped_max,
267 &dev_attr_histogram,
268 NULL
269};
270
271void
272kgsl_sharedmem_uninit_sysfs(void)
273{
274 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
275}
276
277int
278kgsl_sharedmem_init_sysfs(void)
279{
280 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
281 drv_attr_list);
282}
283
284#ifdef CONFIG_OUTER_CACHE
285static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
286{
287 switch (op) {
288 case KGSL_CACHE_OP_FLUSH:
289 outer_flush_range(addr, addr + size);
290 break;
291 case KGSL_CACHE_OP_CLEAN:
292 outer_clean_range(addr, addr + size);
293 break;
294 case KGSL_CACHE_OP_INV:
295 outer_inv_range(addr, addr + size);
296 break;
297 }
298}
299
300static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
301{
302 struct scatterlist *s;
303 int i;
304
305 for_each_sg(sg, s, sglen, i) {
306 unsigned int paddr = kgsl_get_sg_pa(s);
307 _outer_cache_range_op(op, paddr, s->length);
308 }
309}
310
311#else
312static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
313{
314}
315#endif
316
317static int kgsl_ion_alloc_vmfault(struct kgsl_memdesc *memdesc,
318 struct vm_area_struct *vma,
319 struct vm_fault *vmf)
320{
321 unsigned long offset, pfn;
322 int ret;
323
324 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
325
326 pfn = (memdesc->sg[0].dma_address >> PAGE_SHIFT) + offset;
327 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
328
329 if (ret == -ENOMEM || ret == -EAGAIN)
330 return VM_FAULT_OOM;
331 else if (ret == -EFAULT)
332 return VM_FAULT_SIGBUS;
333
334 return 0;
335}
336
337static int kgsl_ion_alloc_vmflags(struct kgsl_memdesc *memdesc)
338{
339 return VM_RESERVED | VM_DONTEXPAND;
340}
341
342static void kgsl_ion_alloc_free(struct kgsl_memdesc *memdesc)
343{
344 kgsl_driver.stats.pre_alloc -= memdesc->size;
345 if (memdesc->handle)
346 ion_free(kgsl_client, memdesc->handle);
347
348 if (memdesc->hostptr) {
349 iounmap(memdesc->hostptr);
350 kgsl_driver.stats.vmalloc -= memdesc->size;
351 }
352
353 if (memdesc->private)
354 kgsl_process_sub_stats(memdesc->private, KGSL_MEM_ENTRY_PRE_ALLOC, memdesc->size);
355 else
356 kgsl_driver.stats.pre_alloc_kernel -= memdesc->size;
357}
358
359static int kgsl_ion_alloc_map_kernel(struct kgsl_memdesc *memdesc)
360{
361 if (!memdesc->hostptr) {
362 memdesc->hostptr = ioremap(memdesc->sg[0].dma_address, memdesc->sg[0].length);
363 if(IS_ERR_OR_NULL(memdesc->hostptr)) {
364 KGSL_CORE_ERR("kgsl: ion ioremap failed\n");
365 return -ENOMEM;
366 }
367 KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
368 kgsl_driver.stats.vmalloc_max);
369 }
370
371 return 0;
372}
373
374static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
375 struct vm_area_struct *vma,
376 struct vm_fault *vmf)
377{
378 unsigned long offset;
379 struct page *page;
380 int i;
381
382 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
383
384 i = offset >> PAGE_SHIFT;
385 page = sg_page(&memdesc->sg[i]);
386 if (page == NULL)
387 return VM_FAULT_SIGBUS;
388
389 get_page(page);
390
391 vmf->page = page;
392 return 0;
393}
394
395static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
396{
397 return VM_RESERVED | VM_DONTEXPAND;
398}
399
400static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
401{
402 int i = 0;
403 struct scatterlist *sg;
404 int sglen = memdesc->sglen;
405
406
407 if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
408 sglen--;
409
410 kgsl_driver.stats.page_alloc -= memdesc->size;
411
412 if (memdesc->hostptr) {
413 vunmap(memdesc->hostptr);
414 kgsl_driver.stats.vmalloc -= memdesc->size;
415 }
416 if (memdesc->sg)
417 for_each_sg(memdesc->sg, sg, sglen, i)
418 __free_page(sg_page(sg));
419
420 if (memdesc->private)
421 kgsl_process_sub_stats(memdesc->private, KGSL_MEM_ENTRY_PAGE_ALLOC, memdesc->size);
422 else
423 kgsl_driver.stats.page_alloc_kernel -= memdesc->size;
424}
425
426static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
427{
428 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
429}
430
431static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
432{
433 if (!memdesc->hostptr) {
434 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
435 struct page **pages = NULL;
436 struct scatterlist *sg;
437 int sglen = memdesc->sglen;
438 int i;
439
440
441 if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
442 sglen--;
443
444
445 pages = vmalloc(sglen * sizeof(struct page *));
446 if (!pages) {
447 KGSL_CORE_ERR("vmalloc(%d) failed\n",
448 sglen * sizeof(struct page *));
449 return -ENOMEM;
450 }
451 for_each_sg(memdesc->sg, sg, sglen, i)
452 pages[i] = sg_page(sg);
453 memdesc->hostptr = vmap(pages, sglen,
454 VM_IOREMAP, page_prot);
455 KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
456 kgsl_driver.stats.vmalloc_max);
457 vfree(pages);
458 }
459 if (!memdesc->hostptr)
460 return -ENOMEM;
461
462 return 0;
463}
464
465static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
466 struct vm_area_struct *vma,
467 struct vm_fault *vmf)
468{
469 unsigned long offset, pfn;
470 int ret;
471
472 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
473 PAGE_SHIFT;
474
475 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
476 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
477
478 if (ret == -ENOMEM || ret == -EAGAIN)
479 return VM_FAULT_OOM;
480 else if (ret == -EFAULT)
481 return VM_FAULT_SIGBUS;
482
483 return VM_FAULT_NOPAGE;
484}
485
486static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
487
488{
489 kgsl_driver.stats.coherent -= memdesc->size;
490 if (memdesc->hostptr)
491 iounmap(memdesc->hostptr);
492
493 free_contiguous_memory_by_paddr(memdesc->physaddr);
494}
495
496static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
497{
498 kgsl_driver.stats.coherent -= memdesc->size;
499 dma_free_coherent(NULL, memdesc->size,
500 memdesc->hostptr, memdesc->physaddr);
501}
502
503struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
504 .free = kgsl_page_alloc_free,
505 .vmflags = kgsl_page_alloc_vmflags,
506 .vmfault = kgsl_page_alloc_vmfault,
507 .map_kernel_mem = kgsl_page_alloc_map_kernel,
508};
509EXPORT_SYMBOL(kgsl_page_alloc_ops);
510
511struct kgsl_memdesc_ops kgsl_ion_alloc_ops = {
512 .free = kgsl_ion_alloc_free,
513 .vmflags = kgsl_ion_alloc_vmflags,
514 .vmfault = kgsl_ion_alloc_vmfault,
515 .map_kernel_mem = kgsl_ion_alloc_map_kernel,
516};
517EXPORT_SYMBOL(kgsl_ion_alloc_ops);
518
519
520static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
521 .free = kgsl_ebimem_free,
522 .vmflags = kgsl_contiguous_vmflags,
523 .vmfault = kgsl_contiguous_vmfault,
524};
525
526static struct kgsl_memdesc_ops kgsl_coherent_ops = {
527 .free = kgsl_coherent_free,
528};
529
530void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
531{
532 void *addr = memdesc->hostptr;
533 int size = memdesc->size;
534
535 switch (op) {
536 case KGSL_CACHE_OP_FLUSH:
537 dmac_flush_range(addr, addr + size);
538 break;
539 case KGSL_CACHE_OP_CLEAN:
540 dmac_clean_range(addr, addr + size);
541 break;
542 case KGSL_CACHE_OP_INV:
543 dmac_inv_range(addr, addr + size);
544 break;
545 }
546
547 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
548}
549EXPORT_SYMBOL(kgsl_cache_range_op);
550
551static int
552_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
553 struct kgsl_pagetable *pagetable,
554 size_t size, unsigned int protflags)
555{
556 int i, order, ret = 0;
557 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
558 struct page **pages = NULL;
559 pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
560 void *ptr;
561
562
563 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
564 sglen++;
565
566 memdesc->size = size;
567 memdesc->pagetable = pagetable;
568 memdesc->priv = KGSL_MEMFLAGS_CACHED;
569 memdesc->ops = &kgsl_page_alloc_ops;
570
571 memdesc->sg = kgsl_sg_alloc(sglen);
572
573 if (memdesc->sg == NULL) {
574 KGSL_CORE_ERR("vmalloc(%d) failed\n",
575 sglen * sizeof(struct scatterlist));
576 ret = -ENOMEM;
577 goto done;
578 }
579
580
581 pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
582
583 if (pages == NULL) {
584 KGSL_CORE_ERR("kmalloc (%d) failed\n",
585 sglen * sizeof(struct page *));
586 ret = -ENOMEM;
587 goto done;
588 }
589
590 kmemleak_not_leak(memdesc->sg);
591
592 memdesc->sglen = sglen;
593 sg_init_table(memdesc->sg, sglen);
594
595 for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
596
597
598 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
599 if (pages[i] == NULL) {
600 ret = -ENOMEM;
601 memdesc->sglen = i;
602 goto done;
603 }
604
605 sg_set_page(&memdesc->sg[i], pages[i], PAGE_SIZE, 0);
606 }
607
608
609
610 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
611
612 if (kgsl_guard_page == NULL)
613 kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
614 __GFP_HIGHMEM);
615
616 if (kgsl_guard_page != NULL) {
617 sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
618 PAGE_SIZE, 0);
619 memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
620 } else
621 memdesc->sglen--;
622 }
623
624
625 ptr = vmap(pages, i, VM_IOREMAP, page_prot);
626
627 if (ptr != NULL) {
628 memset(ptr, 0, memdesc->size);
629 dmac_flush_range(ptr, ptr + memdesc->size);
630 vunmap(ptr);
631 } else {
632 int j;
633
634
635
636 for (j = 0; j < i; j++) {
637 ptr = kmap_atomic(pages[j]);
638 memset(ptr, 0, PAGE_SIZE);
639 dmac_flush_range(ptr, ptr + PAGE_SIZE);
640 kunmap_atomic(ptr);
641 }
642 }
643
644 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
645 KGSL_CACHE_OP_FLUSH);
646
647 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
648
649 if (ret)
650 goto done;
651
652 order = get_order(size);
653
654 if (order < 16)
655 kgsl_driver.stats.histogram[order]++;
656
657done:
658 kfree(pages);
659
660 KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
661 kgsl_driver.stats.page_alloc_max);
662
663 if (ret)
664 kgsl_sharedmem_free(memdesc);
665
666 return ret;
667}
668
669int
670kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
671 struct kgsl_pagetable *pagetable, size_t size)
672{
673 int ret = 0;
674 BUG_ON(size == 0);
675
676 size = ALIGN(size, PAGE_SIZE * 2);
677
678 kgsl_driver.stats.page_alloc_kernel += size;
679 ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
680 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
681 if (!ret)
682 ret = kgsl_page_alloc_map_kernel(memdesc);
683 if (ret) {
684
685 kgsl_driver.stats.page_alloc_kernel += size;
686 kgsl_sharedmem_free(memdesc);
687 }
688 return ret;
689}
690EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
691
692int
693kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
694 struct kgsl_process_private *private,
695 struct kgsl_pagetable *pagetable,
696 size_t size, int flags)
697{
698 unsigned int protflags;
699 int ret = 0;
700
701 if (size == 0)
702 return -EINVAL;
703
704 protflags = GSL_PT_PAGE_RV;
705 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
706 protflags |= GSL_PT_PAGE_WV;
707
708 ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
709 protflags);
710
711 if (ret == 0 && private)
712 kgsl_process_add_stats(private, KGSL_MEM_ENTRY_PAGE_ALLOC, size);
713
714 return ret;
715}
716EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
717
718static int
719_kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc,
720 struct kgsl_pagetable *pagetable,
721 size_t size, unsigned int protflags)
722{
723 int order, ret = 0;
724 int sglen = 1;
725 void *ptr;
726 struct ion_handle *handle = NULL;
727 ion_phys_addr_t pa = 0;
728 size_t len = 0;
729
730
731
732
733
734 memdesc->size = size;
735 memdesc->pagetable = pagetable;
736 memdesc->priv = KGSL_MEMFLAGS_CACHED;
737 memdesc->ops = &kgsl_ion_alloc_ops;
738
739 memdesc->sg = kgsl_sg_alloc(sglen);
740
741 if (memdesc->sg == NULL) {
742 KGSL_CORE_ERR("kgsl_sg_alloc vmalloc(%d) failed\n",
743 sglen * sizeof(struct scatterlist));
744 ret = -ENOMEM;
745 goto done;
746 }
747
748 kmemleak_not_leak(memdesc->sg);
749
750 memdesc->sglen = sglen;
751 sg_init_table(memdesc->sg, sglen);
752
753 if (kgsl_client == NULL)
754 kgsl_client = msm_ion_client_create(-1, "KGSL");
755
756 handle = ion_alloc(kgsl_client, size, SZ_4K, 0x1 << ION_SF_HEAP_ID);
757 if (IS_ERR_OR_NULL(handle)) {
758 ret = -ENOMEM;
759 goto done;
760 }
761
762 if (ion_phys(kgsl_client, handle, &pa, &len)) {
763 KGSL_CORE_ERR("kgsl: ion_phys() failed\n");
764 ret = -ENOMEM;
765 goto done;
766 }
767
768 memdesc->handle = handle;
769
770 memdesc->sg[0].length = memdesc->size;
771 memdesc->sg[0].offset = 0;
772 memdesc->sg[0].dma_address = pa;
773
774
775
776
777
778 ptr = ioremap(pa, memdesc->size);
779
780 if (ptr != NULL) {
781 memset(ptr, 0, memdesc->size);
782 dmac_flush_range(ptr, ptr + memdesc->size);
783 iounmap(ptr);
784 }
785
786 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, KGSL_CACHE_OP_FLUSH);
787
788 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
789
790 if (ret) {
791 KGSL_CORE_ERR("kgsl: kgsl_mmu_map failed\n");
792 ret = -ENOMEM;
793 goto done;
794 }
795
796 order = get_order(size);
797
798 if (order < 16)
799 kgsl_driver.stats.histogram[order]++;
800
801done:
802 KGSL_STATS_ADD(size, kgsl_driver.stats.pre_alloc, kgsl_driver.stats.pre_alloc_max);
803
804 if (ret)
805 kgsl_sharedmem_free(memdesc);
806
807 return ret;
808}
809
810int
811kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc,
812 struct kgsl_pagetable *pagetable,
813 size_t size)
814{
815 int ret;
816
817 BUG_ON(size == 0);
818 size = PAGE_ALIGN(size);
819
820 kgsl_driver.stats.pre_alloc_kernel += size;
821 ret = _kgsl_sharedmem_ion_alloc(memdesc, pagetable, size,
822 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
823
824 if (!ret)
825 ret = kgsl_ion_alloc_map_kernel(memdesc);
826
827 if (ret) {
828
829 kgsl_driver.stats.pre_alloc_kernel += size;
830 kgsl_sharedmem_free(memdesc);
831 }
832 return ret;
833}
834EXPORT_SYMBOL(kgsl_sharedmem_ion_alloc);
835
836int
837kgsl_sharedmem_ion_alloc_user(struct kgsl_memdesc *memdesc,
838 struct kgsl_process_private *private,
839 struct kgsl_pagetable *pagetable,
840 size_t size, int flags)
841{
842 unsigned int protflags;
843 int ret = 0;
844
845 BUG_ON(size == 0);
846
847 size = PAGE_ALIGN(size);
848
849 protflags = GSL_PT_PAGE_RV;
850 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
851 protflags |= GSL_PT_PAGE_WV;
852
853 ret = _kgsl_sharedmem_ion_alloc(memdesc, pagetable, size,
854 protflags);
855
856 if (ret == 0 && private)
857 kgsl_process_add_stats(private, KGSL_MEM_ENTRY_PRE_ALLOC, size);
858
859 return ret;
860}
861EXPORT_SYMBOL(kgsl_sharedmem_ion_alloc_user);
862
863int
864kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
865{
866 int result = 0;
867
868 size = ALIGN(size, PAGE_SIZE);
869
870 memdesc->size = size;
871 memdesc->ops = &kgsl_coherent_ops;
872
873 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
874 GFP_KERNEL);
875 if (memdesc->hostptr == NULL) {
876 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
877 result = -ENOMEM;
878 goto err;
879 }
880
881 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
882 if (result)
883 goto err;
884
885
886
887 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
888 kgsl_driver.stats.coherent_max);
889
890err:
891 if (result)
892 kgsl_sharedmem_free(memdesc);
893
894 return result;
895}
896EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
897
898void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
899{
900 if (memdesc == NULL || memdesc->size == 0)
901 return;
902
903 if (memdesc->gpuaddr)
904 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
905
906 if (memdesc->ops && memdesc->ops->free)
907 memdesc->ops->free(memdesc);
908
909 kgsl_sg_free(memdesc->sg, memdesc->sglen);
910
911 memset(memdesc, 0, sizeof(*memdesc));
912}
913EXPORT_SYMBOL(kgsl_sharedmem_free);
914
915static int
916_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
917 struct kgsl_pagetable *pagetable, size_t size)
918{
919 int result = 0;
920
921 memdesc->size = size;
922 memdesc->pagetable = pagetable;
923 memdesc->ops = &kgsl_ebimem_ops;
924 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
925
926 if (memdesc->physaddr == 0) {
927 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
928 size);
929 return -ENOMEM;
930 }
931
932 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
933
934 if (result)
935 goto err;
936
937 result = kgsl_mmu_map(pagetable, memdesc,
938 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
939
940 if (result)
941 goto err;
942
943 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
944 kgsl_driver.stats.coherent_max);
945
946err:
947 if (result)
948 kgsl_sharedmem_free(memdesc);
949
950 return result;
951}
952
953int
954kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
955 struct kgsl_pagetable *pagetable,
956 size_t size, int flags)
957{
958 size = ALIGN(size, PAGE_SIZE);
959 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
960}
961EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
962
963int
964kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
965 struct kgsl_pagetable *pagetable, size_t size)
966{
967 int result;
968 size = ALIGN(size, 8192);
969 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
970
971 if (result)
972 return result;
973
974 memdesc->hostptr = ioremap(memdesc->physaddr, size);
975
976 if (memdesc->hostptr == NULL) {
977 KGSL_CORE_ERR("ioremap failed\n");
978 kgsl_sharedmem_free(memdesc);
979 return -ENOMEM;
980 }
981
982 return 0;
983}
984EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
985
986int
987kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
988 uint32_t *dst,
989 unsigned int offsetbytes)
990{
991 uint32_t *src;
992 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
993 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
994 if (offsetbytes % sizeof(uint32_t) != 0)
995 return -EINVAL;
996
997 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
998 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
999 return -ERANGE;
1000 src = (uint32_t *)(memdesc->hostptr + offsetbytes);
1001 *dst = *src;
1002 return 0;
1003}
1004EXPORT_SYMBOL(kgsl_sharedmem_readl);
1005
1006int
1007kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
1008 unsigned int offsetbytes,
1009 uint32_t src)
1010{
1011 uint32_t *dst;
1012 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
1013 WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
1014 if (offsetbytes % sizeof(uint32_t) != 0)
1015 return -EINVAL;
1016
1017 WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
1018 if (offsetbytes + sizeof(uint32_t) > memdesc->size)
1019 return -ERANGE;
1020 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
1021 src, sizeof(uint32_t));
1022 dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
1023 *dst = src;
1024 return 0;
1025}
1026EXPORT_SYMBOL(kgsl_sharedmem_writel);
1027
1028int
1029kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
1030 unsigned int value, unsigned int sizebytes)
1031{
1032 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
1033 BUG_ON(offsetbytes + sizebytes > memdesc->size);
1034
1035 kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
1036 sizebytes);
1037 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
1038 return 0;
1039}
1040EXPORT_SYMBOL(kgsl_sharedmem_set);
1041
1042int
1043kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
1044 const struct kgsl_memdesc *memdesc)
1045{
1046 unsigned long addr = vma->vm_start;
1047 unsigned long size = vma->vm_end - vma->vm_start;
1048 int ret, i = 0;
1049
1050 if (!memdesc->sg || (size != memdesc->size) ||
1051 (memdesc->sglen != (size / PAGE_SIZE)))
1052 return -EINVAL;
1053
1054 for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
1055 ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
1056 if (ret)
1057 return ret;
1058 }
1059 return 0;
1060}
1061EXPORT_SYMBOL(kgsl_sharedmem_map_vma);