blob: 888b59924ff76621746a04f63de2f774cbfe9783 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef _ION_PRIV_H
18#define _ION_PRIV_H
19
20#include <linux/kref.h>
21#include <linux/mm_types.h>
22#include <linux/mutex.h>
23#include <linux/rbtree.h>
24#include <linux/ion.h>
25
26struct ion_mapping;
27
28struct ion_dma_mapping {
29 struct kref ref;
30 struct scatterlist *sglist;
31};
32
33struct ion_kernel_mapping {
34 struct kref ref;
35 void *vaddr;
36};
37
38struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
39
40/**
41 * struct ion_buffer - metadata for a particular buffer
42 * @ref: refernce count
43 * @node: node in the ion_device buffers tree
44 * @dev: back pointer to the ion_device
45 * @heap: back pointer to the heap the buffer came from
46 * @flags: buffer specific flags
47 * @size: size of the buffer
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070048 * @priv_virt: private data to the buffer representable as
49 * a void *
50 * @priv_phys: private data to the buffer representable as
51 * an ion_phys_addr_t (and someday a phys_addr_t)
52 * @lock: protects the buffers cnt fields
53 * @kmap_cnt: number of times the buffer is mapped to the kernel
54 * @vaddr: the kenrel mapping if kmap_cnt is not zero
55 * @dmap_cnt: number of times the buffer is mapped for dma
56 * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070057*/
58struct ion_buffer {
59 struct kref ref;
60 struct rb_node node;
61 struct ion_device *dev;
62 struct ion_heap *heap;
63 unsigned long flags;
64 size_t size;
65 union {
66 void *priv_virt;
67 ion_phys_addr_t priv_phys;
68 };
69 struct mutex lock;
70 int kmap_cnt;
71 void *vaddr;
72 int dmap_cnt;
73 struct scatterlist *sglist;
Laura Abbott894fd582011-08-19 13:33:56 -070074 int umap_cnt;
Laura Abbott404f8242011-10-31 14:22:53 -070075 int marked;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070076};
77
78/**
79 * struct ion_heap_ops - ops to operate on a given heap
80 * @allocate: allocate memory
81 * @free: free memory
82 * @phys get physical address of a buffer (only define on
83 * physically contiguous heaps)
84 * @map_dma map the memory for dma to a scatterlist
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070085 * @unmap_dma unmap the memory for dma
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @map_kernel map memory to the kernel
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070087 * @unmap_kernel unmap memory to the kernel
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070088 * @map_user map memory to userspace
89 */
90struct ion_heap_ops {
91 int (*allocate) (struct ion_heap *heap,
92 struct ion_buffer *buffer, unsigned long len,
93 unsigned long align, unsigned long flags);
94 void (*free) (struct ion_buffer *buffer);
95 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
96 ion_phys_addr_t *addr, size_t *len);
97 struct scatterlist *(*map_dma) (struct ion_heap *heap,
98 struct ion_buffer *buffer);
99 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
Laura Abbott894fd582011-08-19 13:33:56 -0700100 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
101 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700102 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
103 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700104 struct vm_area_struct *vma, unsigned long flags);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700105 int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
106 void *vaddr, unsigned int offset,
107 unsigned int length, unsigned int cmd);
Laura Abbott68c80642011-10-21 17:32:27 -0700108 unsigned long (*get_allocated)(struct ion_heap *heap);
109 unsigned long (*get_total)(struct ion_heap *heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700110};
111
112/**
113 * struct ion_heap - represents a heap in the system
114 * @node: rb node to put the heap on the device's tree of heaps
115 * @dev: back pointer to the ion_device
116 * @type: type of heap
117 * @ops: ops struct as above
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700118 * @id: id of heap, also indicates priority of this heap when
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700119 * allocating. These are specified by platform data and
120 * MUST be unique
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700121 * @name: used for debugging
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700122 *
123 * Represents a pool of memory from which buffers can be made. In some
124 * systems the only heap is regular system memory allocated via vmalloc.
125 * On others, some blocks might require large physically contiguous buffers
126 * that are allocated from a specially reserved heap.
127 */
128struct ion_heap {
129 struct rb_node node;
130 struct ion_device *dev;
131 enum ion_heap_type type;
132 struct ion_heap_ops *ops;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700133 int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700134 const char *name;
135};
136
137/**
138 * ion_device_create - allocates and returns an ion device
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700139 * @custom_ioctl: arch specific ioctl function if applicable
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700140 *
141 * returns a valid device or -PTR_ERR
142 */
143struct ion_device *ion_device_create(long (*custom_ioctl)
144 (struct ion_client *client,
145 unsigned int cmd,
146 unsigned long arg));
147
148/**
149 * ion_device_destroy - free and device and it's resource
150 * @dev: the device
151 */
152void ion_device_destroy(struct ion_device *dev);
153
154/**
155 * ion_device_add_heap - adds a heap to the ion device
156 * @dev: the device
157 * @heap: the heap to add
158 */
159void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
160
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700161/**
162 * functions for creating and destroying the built in ion heaps.
163 * architectures can add their own custom architecture specific
164 * heaps as appropriate.
165 */
166
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700167struct ion_heap *ion_heap_create(struct ion_platform_heap *);
168void ion_heap_destroy(struct ion_heap *);
169
170struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
171void ion_system_heap_destroy(struct ion_heap *);
172
173struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
174void ion_system_contig_heap_destroy(struct ion_heap *);
175
176struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
177void ion_carveout_heap_destroy(struct ion_heap *);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700178/**
179 * kernel api to allocate/free from carveout -- used when carveout is
180 * used to back an architecture specific custom heap
181 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700182ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
183 unsigned long align);
184void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
185 unsigned long size);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700186/**
187 * The carveout heap returns physical addresses, since 0 may be a valid
188 * physical address, this is used to indicate allocation failed
189 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700190#define ION_CARVEOUT_ALLOCATE_FAIL -1
191
192#endif /* _ION_PRIV_H */