blob: f9e04cc76b766d1b38b1ebba09295cb0d9dc85ef [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Anshuman Danieecd5202012-02-17 19:52:49 +053020#include <linux/slab.h>
21#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23struct kgsl_device;
24struct kgsl_process_private;
25
26#define KGSL_CACHE_OP_INV 0x01
27#define KGSL_CACHE_OP_FLUSH 0x02
28#define KGSL_CACHE_OP_CLEAN 0x03
29
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -060030/** Set if the memdesc is mapped into all pagetables */
31#define KGSL_MEMFLAGS_GLOBAL 0x00000002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060033extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060035int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036 struct kgsl_pagetable *pagetable, size_t size);
37
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -060038int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039 struct kgsl_pagetable *pagetable,
40 size_t size, int flags);
41
42int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
43
44int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
45 struct kgsl_pagetable *pagetable,
46 size_t size, int flags);
47
48int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
49 struct kgsl_pagetable *pagetable,
50 size_t size);
51
52void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
53
54int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
55 uint32_t *dst,
56 unsigned int offsetbytes);
57
58int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
59 unsigned int offsetbytes,
60 uint32_t src);
61
62int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
63 unsigned int offsetbytes, unsigned int value,
64 unsigned int sizebytes);
65
66void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
67
68void kgsl_process_init_sysfs(struct kgsl_process_private *private);
69void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
70
71int kgsl_sharedmem_init_sysfs(void);
72void kgsl_sharedmem_uninit_sysfs(void);
73
Jeremy Gebben582fe312012-03-23 10:19:44 -060074static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
75{
76 /*
77 * Try sg_dma_address first to support ion carveout
78 * regions which do not work with sg_phys().
79 */
80 unsigned int pa = sg_dma_address(sg);
81 if (pa == 0)
82 pa = sg_phys(sg);
83 return pa;
84}
85
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -060086int
87kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
88 const struct kgsl_memdesc *memdesc);
89
Jordan Crousea652a072012-04-06 16:26:33 -060090/*
91 * For relatively small sglists, it is preferable to use kzalloc
92 * rather than going down the vmalloc rat hole. If the size of
93 * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
94 * vmalloc
95 */
96
97static inline void *kgsl_sg_alloc(unsigned int sglen)
98{
99 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
100 return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
101 else
102 return vmalloc(sglen * sizeof(struct scatterlist));
103}
104
105static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
106{
107 if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
108 kfree(ptr);
109 else
110 vfree(ptr);
111}
112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -0600114memdesc_sg_phys(struct kgsl_memdesc *memdesc,
115 unsigned int physaddr, unsigned int size)
116{
Jordan Crousea652a072012-04-06 16:26:33 -0600117 memdesc->sg = kgsl_sg_alloc(1);
Ajay Dudani62dd83e2012-09-11 16:38:13 -0600118 if (!memdesc->sg)
119 return -ENOMEM;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600120
Anshuman Danieecd5202012-02-17 19:52:49 +0530121 kmemleak_not_leak(memdesc->sg);
122
Jordan Croused17e9aa2011-10-12 16:57:48 -0600123 memdesc->sglen = 1;
124 sg_init_table(memdesc->sg, 1);
Jeremy Gebben582fe312012-03-23 10:19:44 -0600125 memdesc->sg[0].length = size;
126 memdesc->sg[0].offset = 0;
127 memdesc->sg[0].dma_address = physaddr;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600128 return 0;
129}
130
131static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132kgsl_allocate(struct kgsl_memdesc *memdesc,
133 struct kgsl_pagetable *pagetable, size_t size)
134{
Jeremy Gebben32660362011-11-03 09:59:51 -0600135 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
136 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600137 memdesc->priv |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600138 return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700139}
140
141static inline int
142kgsl_allocate_user(struct kgsl_memdesc *memdesc,
143 struct kgsl_pagetable *pagetable,
144 size_t size, unsigned int flags)
145{
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600146 int ret;
147 unsigned int mask = (KGSL_MEMTYPE_MASK | KGSL_MEMFLAGS_GPUREADONLY);
Jeremy Gebben32660362011-11-03 09:59:51 -0600148 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600149 ret = kgsl_sharedmem_ebimem_user(memdesc, pagetable, size,
Jeremy Gebben32660362011-11-03 09:59:51 -0600150 flags);
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600151 else
152 ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size,
153 flags);
154 if (ret == 0)
155 memdesc->priv |= flags & mask;
156 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157}
158
159static inline int
160kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
161{
162 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600163 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 memdesc->gpuaddr = memdesc->physaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 return ret;
166}
167
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600168static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
169{
170 int i, size = 0;
171 struct scatterlist *s;
172
173 for_each_sg(sg, s, sglen, i) {
174 size += s->length;
175 }
176
177 return size;
178}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179#endif /* __KGSL_SHAREDMEM_H */