blob: 67a1c2d7b51e974974175bd473e9b31715684741 [file] [log] [blame]
Jeff Boody28afec42012-01-18 15:47:46 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __KGSL_SHAREDMEM_H
14#define __KGSL_SHAREDMEM_H
15
Jordan Croused17e9aa2011-10-12 16:57:48 -060016#include <linux/slab.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070017#include <linux/dma-mapping.h>
Jeff Boody28afec42012-01-18 15:47:46 -070018#include <linux/vmalloc.h>
Jeremy Gebben32660362011-11-03 09:59:51 -060019#include "kgsl_mmu.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21struct kgsl_device;
22struct kgsl_process_private;
23
24#define KGSL_CACHE_OP_INV 0x01
25#define KGSL_CACHE_OP_FLUSH 0x02
26#define KGSL_CACHE_OP_CLEAN 0x03
27
28/** Set if the memdesc describes cached memory */
29#define KGSL_MEMFLAGS_CACHED 0x00000001
30
31struct kgsl_memdesc_ops {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032 int (*vmflags)(struct kgsl_memdesc *);
33 int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
34 struct vm_fault *);
35 void (*free)(struct kgsl_memdesc *memdesc);
36};
37
38extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039
40int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
41 struct kgsl_pagetable *pagetable, size_t size);
42
43int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
44 struct kgsl_pagetable *pagetable,
45 size_t size, int flags);
46
47int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
48
49int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
50 struct kgsl_pagetable *pagetable,
51 size_t size, int flags);
52
53int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
54 struct kgsl_pagetable *pagetable,
55 size_t size);
56
57void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
58
59int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
60 uint32_t *dst,
61 unsigned int offsetbytes);
62
63int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
64 unsigned int offsetbytes,
65 uint32_t src);
66
67int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
68 unsigned int offsetbytes, unsigned int value,
69 unsigned int sizebytes);
70
71void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
72
73void kgsl_process_init_sysfs(struct kgsl_process_private *private);
74void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
75
76int kgsl_sharedmem_init_sysfs(void);
77void kgsl_sharedmem_uninit_sysfs(void);
78
79static inline int
Jordan Croused17e9aa2011-10-12 16:57:48 -060080memdesc_sg_phys(struct kgsl_memdesc *memdesc,
81 unsigned int physaddr, unsigned int size)
82{
83 struct page *page = phys_to_page(physaddr);
84
Jeff Boody28afec42012-01-18 15:47:46 -070085 memdesc->sg = vmalloc(sizeof(struct scatterlist) * 1);
Jordan Croused17e9aa2011-10-12 16:57:48 -060086 if (memdesc->sg == NULL)
87 return -ENOMEM;
88
89 memdesc->sglen = 1;
90 sg_init_table(memdesc->sg, 1);
91 sg_set_page(&memdesc->sg[0], page, size, 0);
92 return 0;
93}
94
95static inline int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096kgsl_allocate(struct kgsl_memdesc *memdesc,
97 struct kgsl_pagetable *pagetable, size_t size)
98{
Jeremy Gebben32660362011-11-03 09:59:51 -060099 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
100 return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102}
103
104static inline int
105kgsl_allocate_user(struct kgsl_memdesc *memdesc,
106 struct kgsl_pagetable *pagetable,
107 size_t size, unsigned int flags)
108{
Jeremy Gebben32660362011-11-03 09:59:51 -0600109 if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
110 return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size,
111 flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113}
114
115static inline int
116kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
117{
118 int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
Jeremy Gebben32660362011-11-03 09:59:51 -0600119 if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700120 memdesc->gpuaddr = memdesc->physaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700121 return ret;
122}
123
124#endif /* __KGSL_SHAREDMEM_H */