blob: ecf292ece106cad0f9e9ec5d82ccbf29a6d31c19 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Anshuman Danieecd5202012-02-17 19:52:49 +053020#include <linux/slab.h>
21#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jordan Crousedc67dfb2012-10-25 09:41:46 -060023#include "kgsl_log.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025struct kgsl_device;
26struct kgsl_process_private;
27
28#define KGSL_CACHE_OP_INV 0x01
29#define KGSL_CACHE_OP_FLUSH 0x02
30#define KGSL_CACHE_OP_CLEAN 0x03
31
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060032extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060034int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 struct kgsl_pagetable *pagetable, size_t size);
36
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060037int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -060039 size_t size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
42
43int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
44 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -060045 size_t size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
47int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
48 struct kgsl_pagetable *pagetable,
49 size_t size);
50
51void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
52
53int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
54 uint32_t *dst,
55 unsigned int offsetbytes);
56
57int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
58 unsigned int offsetbytes,
59 uint32_t src);
60
61int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
62 unsigned int offsetbytes, unsigned int value,
63 unsigned int sizebytes);
64
65void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
66
67void kgsl_process_init_sysfs(struct kgsl_process_private *private);
68void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
69
70int kgsl_sharedmem_init_sysfs(void);
71void kgsl_sharedmem_uninit_sysfs(void);
72
Jordan Crousedc67dfb2012-10-25 09:41:46 -060073/*
74 * kgsl_memdesc_get_align - Get alignment flags from a memdesc
75 * @memdesc - the memdesc
76 *
77 * Returns the alignment requested, as power of 2 exponent.
78 */
79static inline int
80kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
81{
82 return (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
83}
84
85/*
86 * kgsl_memdesc_set_align - Set alignment flags of a memdesc
87 * @memdesc - the memdesc
88 * @align - alignment requested, as a power of 2 exponent.
89 */
90static inline int
91kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
92{
93 if (align > 32) {
94 KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
95 align = 32;
96 }
97
98 memdesc->flags &= ~KGSL_MEMALIGN_MASK;
99 memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
100 return 0;
101}
102
Jeremy Gebben582fe312012-03-23 10:19:44 -0600103static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
104{
105 /*
106 * Try sg_dma_address first to support ion carveout
107 * regions which do not work with sg_phys().
108 */
109 unsigned int pa = sg_dma_address(sg);
110 if (pa == 0)
111 pa = sg_phys(sg);
112 return pa;
113}
114
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600115int
116kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
117 const struct kgsl_memdesc *memdesc);
118
Jordan Crousea652a072012-04-06 16:26:33 -0600119/*
120 * For relatively small sglists, it is preferable to use kzalloc
121 * rather than going down the vmalloc rat hole. If the size of
122 * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
123 * vmalloc
124 */
125
126static inline void *kgsl_sg_alloc(unsigned int sglen)
127{
128 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
129 return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
Dhivya Subramanian24866ee2013-02-07 12:24:26 -0800130 else {
131 void *ptr = vmalloc(sglen * sizeof(struct scatterlist));
132 if (ptr)
133 memset(ptr, 0, sglen * sizeof(struct scatterlist));
134
135 return ptr;
136 }
Jordan Crousea652a072012-04-06 16:26:33 -0600137}
138
139static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
140{
141 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
142 kfree(ptr);
143 else
144 vfree(ptr);
145}
146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -0600148memdesc_sg_phys(struct kgsl_memdesc *memdesc,
149 unsigned int physaddr, unsigned int size)
150{
Jordan Crousea652a072012-04-06 16:26:33 -0600151 memdesc->sg = kgsl_sg_alloc(1);
Ajay Dudani62dd83e2012-09-11 16:38:13 -0600152 if (!memdesc->sg)
153 return -ENOMEM;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600154
Anshuman Danieecd5202012-02-17 19:52:49 +0530155 kmemleak_not_leak(memdesc->sg);
156
Jordan Croused17e9aa2011-10-12 16:57:48 -0600157 memdesc->sglen = 1;
158 sg_init_table(memdesc->sg, 1);
Jeremy Gebben582fe312012-03-23 10:19:44 -0600159 memdesc->sg[0].length = size;
160 memdesc->sg[0].offset = 0;
161 memdesc->sg[0].dma_address = physaddr;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600162 return 0;
163}
164
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600165/*
166 * kgsl_memdesc_is_global - is this a globally mapped buffer?
167 * @memdesc: the memdesc
168 *
169 * Returns nonzero if this is a global mapping, 0 otherwise
170 */
171static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
172{
173 return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
174}
175
Jordan Croused17e9aa2011-10-12 16:57:48 -0600176static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177kgsl_allocate(struct kgsl_memdesc *memdesc,
178 struct kgsl_pagetable *pagetable, size_t size)
179{
Jeremy Gebben32660362011-11-03 09:59:51 -0600180 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
181 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600182 memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600183 return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184}
185
186static inline int
187kgsl_allocate_user(struct kgsl_memdesc *memdesc,
188 struct kgsl_pagetable *pagetable,
189 size_t size, unsigned int flags)
190{
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600191 int ret;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600192
193 memdesc->flags = flags;
194
Jeremy Gebben32660362011-11-03 09:59:51 -0600195 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600196 ret = kgsl_sharedmem_ebimem_user(memdesc, pagetable, size);
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600197 else
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600198 ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
199
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600200 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201}
202
203static inline int
204kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
205{
206 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600207 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 memdesc->gpuaddr = memdesc->physaddr;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600209
210 memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211 return ret;
212}
213
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600214static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
215{
216 int i, size = 0;
217 struct scatterlist *s;
218
219 for_each_sg(sg, s, sglen, i) {
220 size += s->length;
221 }
222
223 return size;
224}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225#endif /* __KGSL_SHAREDMEM_H */