blob: a1e4c91f067f75afbc3d3769c4e0944c4cbf7891 [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Anshuman Danieecd5202012-02-17 19:52:49 +053020#include <linux/slab.h>
21#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23struct kgsl_device;
24struct kgsl_process_private;
25
26#define KGSL_CACHE_OP_INV 0x01
27#define KGSL_CACHE_OP_FLUSH 0x02
28#define KGSL_CACHE_OP_CLEAN 0x03
29
30/** Set if the memdesc describes cached memory */
31#define KGSL_MEMFLAGS_CACHED 0x00000001
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -060032/** Set if the memdesc is mapped into all pagetables */
33#define KGSL_MEMFLAGS_GLOBAL 0x00000002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35struct kgsl_memdesc_ops {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036 int (*vmflags)(struct kgsl_memdesc *);
37 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
38 struct vm_fault *);
39 void (*free)(struct kgsl_memdesc *memdesc);
40};
41
42extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
44int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
45 struct kgsl_pagetable *pagetable, size_t size);
46
47int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
48 struct kgsl_pagetable *pagetable,
49 size_t size, int flags);
50
51int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
52
53int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
54 struct kgsl_pagetable *pagetable,
55 size_t size, int flags);
56
57int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
58 struct kgsl_pagetable *pagetable,
59 size_t size);
60
61void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
62
63int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
64 uint32_t *dst,
65 unsigned int offsetbytes);
66
67int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
68 unsigned int offsetbytes,
69 uint32_t src);
70
71int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
72 unsigned int offsetbytes, unsigned int value,
73 unsigned int sizebytes);
74
75void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
76
77void kgsl_process_init_sysfs(struct kgsl_process_private *private);
78void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
79
80int kgsl_sharedmem_init_sysfs(void);
81void kgsl_sharedmem_uninit_sysfs(void);
82
Jeremy Gebben582fe312012-03-23 10:19:44 -060083static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
84{
85 /*
86 * Try sg_dma_address first to support ion carveout
87 * regions which do not work with sg_phys().
88 */
89 unsigned int pa = sg_dma_address(sg);
90 if (pa == 0)
91 pa = sg_phys(sg);
92 return pa;
93}
94
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -060096memdesc_sg_phys(struct kgsl_memdesc *memdesc,
97 unsigned int physaddr, unsigned int size)
98{
Jeff Boody28afec42012-01-18 15:47:46 -070099 memdesc->sg = vmalloc(sizeof(struct scatterlist) * 1);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600100 if (memdesc->sg == NULL)
101 return -ENOMEM;
102
Anshuman Danieecd5202012-02-17 19:52:49 +0530103 kmemleak_not_leak(memdesc->sg);
104
Jordan Croused17e9aa2011-10-12 16:57:48 -0600105 memdesc->sglen = 1;
106 sg_init_table(memdesc->sg, 1);
Jeremy Gebben582fe312012-03-23 10:19:44 -0600107 memdesc->sg[0].length = size;
108 memdesc->sg[0].offset = 0;
109 memdesc->sg[0].dma_address = physaddr;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600110 return 0;
111}
112
113static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114kgsl_allocate(struct kgsl_memdesc *memdesc,
115 struct kgsl_pagetable *pagetable, size_t size)
116{
Jeremy Gebben32660362011-11-03 09:59:51 -0600117 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
118 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119 return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120}
121
122static inline int
123kgsl_allocate_user(struct kgsl_memdesc *memdesc,
124 struct kgsl_pagetable *pagetable,
125 size_t size, unsigned int flags)
126{
Jeremy Gebben32660362011-11-03 09:59:51 -0600127 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
128 return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size,
129 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131}
132
133static inline int
134kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
135{
136 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600137 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 memdesc->gpuaddr = memdesc->physaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139 return ret;
140}
141
142#endif /* __KGSL_SHAREDMEM_H */