blob: fd32b45b8c25ec320b8dcc2c38f5715d25026f5c [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Anshuman Danieecd5202012-02-17 19:52:49 +053020#include <linux/slab.h>
21#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
Jordan Crousedc67dfb2012-10-25 09:41:46 -060023#include "kgsl_log.h"
24
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025struct kgsl_device;
26struct kgsl_process_private;
27
28#define KGSL_CACHE_OP_INV 0x01
29#define KGSL_CACHE_OP_FLUSH 0x02
30#define KGSL_CACHE_OP_CLEAN 0x03
31
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060032extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060034int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035 struct kgsl_pagetable *pagetable, size_t size);
36
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060037int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -060039 size_t size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
42
43int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
44 struct kgsl_pagetable *pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -060045 size_t size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
47int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
48 struct kgsl_pagetable *pagetable,
49 size_t size);
50
51void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
52
53int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
54 uint32_t *dst,
55 unsigned int offsetbytes);
56
57int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
58 unsigned int offsetbytes,
59 uint32_t src);
60
61int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
62 unsigned int offsetbytes, unsigned int value,
63 unsigned int sizebytes);
64
65void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
66
67void kgsl_process_init_sysfs(struct kgsl_process_private *private);
68void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
69
70int kgsl_sharedmem_init_sysfs(void);
71void kgsl_sharedmem_uninit_sysfs(void);
72
Jordan Crousedc67dfb2012-10-25 09:41:46 -060073/*
74 * kgsl_memdesc_get_align - Get alignment flags from a memdesc
75 * @memdesc - the memdesc
76 *
77 * Returns the alignment requested, as power of 2 exponent.
78 */
79static inline int
80kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
81{
82 return (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
83}
84
85/*
86 * kgsl_memdesc_set_align - Set alignment flags of a memdesc
87 * @memdesc - the memdesc
88 * @align - alignment requested, as a power of 2 exponent.
89 */
90static inline int
91kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
92{
93 if (align > 32) {
94 KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
95 align = 32;
96 }
97
98 memdesc->flags &= ~KGSL_MEMALIGN_MASK;
99 memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
100 return 0;
101}
102
Jeremy Gebben582fe312012-03-23 10:19:44 -0600103static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
104{
105 /*
106 * Try sg_dma_address first to support ion carveout
107 * regions which do not work with sg_phys().
108 */
109 unsigned int pa = sg_dma_address(sg);
110 if (pa == 0)
111 pa = sg_phys(sg);
112 return pa;
113}
114
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600115int
116kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
117 const struct kgsl_memdesc *memdesc);
118
Jordan Crousea652a072012-04-06 16:26:33 -0600119/*
120 * For relatively small sglists, it is preferable to use kzalloc
121 * rather than going down the vmalloc rat hole. If the size of
122 * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
123 * vmalloc
124 */
125
126static inline void *kgsl_sg_alloc(unsigned int sglen)
127{
128 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
129 return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
130 else
131 return vmalloc(sglen * sizeof(struct scatterlist));
132}
133
134static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
135{
136 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
137 kfree(ptr);
138 else
139 vfree(ptr);
140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -0600143memdesc_sg_phys(struct kgsl_memdesc *memdesc,
144 unsigned int physaddr, unsigned int size)
145{
Jordan Crousea652a072012-04-06 16:26:33 -0600146 memdesc->sg = kgsl_sg_alloc(1);
Ajay Dudani62dd83e2012-09-11 16:38:13 -0600147 if (!memdesc->sg)
148 return -ENOMEM;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600149
Anshuman Danieecd5202012-02-17 19:52:49 +0530150 kmemleak_not_leak(memdesc->sg);
151
Jordan Croused17e9aa2011-10-12 16:57:48 -0600152 memdesc->sglen = 1;
153 sg_init_table(memdesc->sg, 1);
Jeremy Gebben582fe312012-03-23 10:19:44 -0600154 memdesc->sg[0].length = size;
155 memdesc->sg[0].offset = 0;
156 memdesc->sg[0].dma_address = physaddr;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600157 return 0;
158}
159
160static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161kgsl_allocate(struct kgsl_memdesc *memdesc,
162 struct kgsl_pagetable *pagetable, size_t size)
163{
Jeremy Gebben32660362011-11-03 09:59:51 -0600164 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
165 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600166 memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600167 return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700168}
169
170static inline int
171kgsl_allocate_user(struct kgsl_memdesc *memdesc,
172 struct kgsl_pagetable *pagetable,
173 size_t size, unsigned int flags)
174{
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600175 int ret;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600176
177 memdesc->flags = flags;
178
Jeremy Gebben32660362011-11-03 09:59:51 -0600179 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600180 ret = kgsl_sharedmem_ebimem_user(memdesc, pagetable, size);
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600181 else
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600182 ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
183
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600184 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185}
186
187static inline int
188kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
189{
190 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600191 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 memdesc->gpuaddr = memdesc->physaddr;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600193
194 memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700195 return ret;
196}
197
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600198static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
199{
200 int i, size = 0;
201 struct scatterlist *s;
202
203 for_each_sg(sg, s, sglen, i) {
204 size += s->length;
205 }
206
207 return size;
208}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209#endif /* __KGSL_SHAREDMEM_H */