blob: e54110dc81dc8e91871ae2ce7ba067dbcaf467d9 [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Anshuman Danieecd5202012-02-17 19:52:49 +053020#include <linux/slab.h>
21#include <linux/kmemleak.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23struct kgsl_device;
24struct kgsl_process_private;
25
26#define KGSL_CACHE_OP_INV 0x01
27#define KGSL_CACHE_OP_FLUSH 0x02
28#define KGSL_CACHE_OP_CLEAN 0x03
29
30/** Set if the memdesc describes cached memory */
31#define KGSL_MEMFLAGS_CACHED 0x00000001
32
33struct kgsl_memdesc_ops {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034 int (*vmflags)(struct kgsl_memdesc *);
35 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
36 struct vm_fault *);
37 void (*free)(struct kgsl_memdesc *memdesc);
38};
39
40extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
43 struct kgsl_pagetable *pagetable, size_t size);
44
45int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
46 struct kgsl_pagetable *pagetable,
47 size_t size, int flags);
48
49int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
50
51int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
52 struct kgsl_pagetable *pagetable,
53 size_t size, int flags);
54
55int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
56 struct kgsl_pagetable *pagetable,
57 size_t size);
58
59void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
60
61int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
62 uint32_t *dst,
63 unsigned int offsetbytes);
64
65int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
66 unsigned int offsetbytes,
67 uint32_t src);
68
69int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
70 unsigned int offsetbytes, unsigned int value,
71 unsigned int sizebytes);
72
73void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
74
75void kgsl_process_init_sysfs(struct kgsl_process_private *private);
76void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
77
78int kgsl_sharedmem_init_sysfs(void);
79void kgsl_sharedmem_uninit_sysfs(void);
80
81static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -060082memdesc_sg_phys(struct kgsl_memdesc *memdesc,
83 unsigned int physaddr, unsigned int size)
84{
85 struct page *page = phys_to_page(physaddr);
86
Jeff Boody28afec42012-01-18 15:47:46 -070087 memdesc->sg = vmalloc(sizeof(struct scatterlist) * 1);
Jordan Croused17e9aa2011-10-12 16:57:48 -060088 if (memdesc->sg == NULL)
89 return -ENOMEM;
90
Anshuman Danieecd5202012-02-17 19:52:49 +053091 kmemleak_not_leak(memdesc->sg);
92
Jordan Croused17e9aa2011-10-12 16:57:48 -060093 memdesc->sglen = 1;
94 sg_init_table(memdesc->sg, 1);
95 sg_set_page(&memdesc->sg[0], page, size, 0);
96 return 0;
97}
98
99static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100kgsl_allocate(struct kgsl_memdesc *memdesc,
101 struct kgsl_pagetable *pagetable, size_t size)
102{
Jeremy Gebben32660362011-11-03 09:59:51 -0600103 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
104 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105 return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106}
107
108static inline int
109kgsl_allocate_user(struct kgsl_memdesc *memdesc,
110 struct kgsl_pagetable *pagetable,
111 size_t size, unsigned int flags)
112{
Jeremy Gebben32660362011-11-03 09:59:51 -0600113 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
114 return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size,
115 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116 return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117}
118
119static inline int
120kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
121{
122 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600123 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 memdesc->gpuaddr = memdesc->physaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700125 return ret;
126}
127
128#endif /* __KGSL_SHAREDMEM_H */