Naveen Ramaraj | 0f5e7ab | 2012-04-24 19:10:23 -0700 | [diff] [blame^] | 1 | /* Copyright (c) 2012, Code Aurora Forum. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/slab.h> |
| 14 | #include <mach/ocmem_priv.h> |
| 15 | |
| 16 | static inline int check_id(int id) |
| 17 | { |
| 18 | return (id < OCMEM_CLIENT_MAX && id >= OCMEM_GRAPHICS); |
| 19 | } |
| 20 | |
| 21 | static struct ocmem_handle *generate_handle(void) |
| 22 | { |
| 23 | struct ocmem_handle *handle = NULL; |
| 24 | |
| 25 | handle = kzalloc(sizeof(struct ocmem_handle), GFP_KERNEL); |
| 26 | if (!handle) { |
| 27 | pr_err("ocmem: Unable to generate buffer handle\n"); |
| 28 | return NULL; |
| 29 | } |
| 30 | mutex_init(&handle->handle_mutex); |
| 31 | return handle; |
| 32 | } |
| 33 | |
| 34 | static int free_handle(struct ocmem_handle *handle) |
| 35 | { |
| 36 | if (!handle) |
| 37 | return -EINVAL; |
| 38 | |
| 39 | mutex_destroy(&handle->handle_mutex); |
| 40 | kfree(handle); |
| 41 | handle = NULL; |
| 42 | return 0; |
| 43 | } |
| 44 | |
| 45 | static int __ocmem_free(int id, struct ocmem_buf *buf) |
| 46 | { |
| 47 | int ret = 0; |
| 48 | struct ocmem_handle *handle = buffer_to_handle(buf); |
| 49 | |
| 50 | if (!handle) |
| 51 | return -EINVAL; |
| 52 | |
| 53 | mutex_lock(&handle->handle_mutex); |
| 54 | ret = process_free(id, handle); |
| 55 | mutex_unlock(&handle->handle_mutex); |
| 56 | |
| 57 | if (ret) |
| 58 | return -EINVAL; |
| 59 | |
| 60 | free_handle(handle); |
| 61 | return 0; |
| 62 | } |
| 63 | |
| 64 | static struct ocmem_buf *__ocmem_allocate_range(int id, unsigned long min, |
| 65 | unsigned long max, unsigned long step, bool block, bool wait) |
| 66 | { |
| 67 | struct ocmem_handle *handle = NULL; |
| 68 | int ret = 0; |
| 69 | |
| 70 | handle = generate_handle(); |
| 71 | if (!handle) { |
| 72 | pr_err("ocmem: Unable to generate handle\n"); |
| 73 | return NULL; |
| 74 | } |
| 75 | |
| 76 | mutex_lock(&handle->handle_mutex); |
| 77 | ret = process_allocate(id, handle, min, max, step, block, wait); |
| 78 | mutex_unlock(&handle->handle_mutex); |
| 79 | if (ret) { |
| 80 | pr_err("ocmem allocation failed\n"); |
| 81 | free_handle(handle); |
| 82 | return NULL; |
| 83 | } else |
| 84 | return handle_to_buffer(handle); |
| 85 | } |
| 86 | |
| 87 | struct ocmem_buf *ocmem_allocate(int client_id, unsigned long size) |
| 88 | { |
| 89 | bool can_block = false; |
| 90 | bool can_wait = true; |
| 91 | |
| 92 | if (!check_id(client_id)) { |
| 93 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 94 | return NULL; |
| 95 | } |
| 96 | |
| 97 | if (size < OCMEM_MIN_ALLOC) { |
| 98 | pr_err("ocmem: requested size %lx must be at least %x\n", |
| 99 | size, OCMEM_MIN_ALLOC); |
| 100 | return NULL; |
| 101 | } |
| 102 | |
| 103 | if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { |
| 104 | pr_err("ocmem: Invalid alignment, size must be %x aligned\n", |
| 105 | OCMEM_MIN_ALIGN); |
| 106 | return NULL; |
| 107 | } |
| 108 | |
| 109 | return __ocmem_allocate_range(client_id, size, size, |
| 110 | size, can_block, can_wait); |
| 111 | } |
| 112 | |
| 113 | struct ocmem_buf *ocmem_allocate_nowait(int client_id, unsigned long size) |
| 114 | { |
| 115 | bool can_block = false; |
| 116 | bool can_wait = false; |
| 117 | |
| 118 | if (!check_id(client_id)) { |
| 119 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 120 | return NULL; |
| 121 | } |
| 122 | |
| 123 | if (size < OCMEM_MIN_ALLOC) { |
| 124 | pr_err("ocmem: requested size %lx must be at least %x\n", |
| 125 | size, OCMEM_MIN_ALLOC); |
| 126 | return NULL; |
| 127 | } |
| 128 | |
| 129 | if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { |
| 130 | pr_err("ocmem: Invalid alignment, size must be %x aligned\n", |
| 131 | OCMEM_MIN_ALIGN); |
| 132 | return NULL; |
| 133 | } |
| 134 | return __ocmem_allocate_range(client_id, size, size, |
| 135 | size, can_block, can_wait); |
| 136 | } |
| 137 | |
| 138 | struct ocmem_buf *ocmem_allocate_range(int client_id, unsigned long min, |
| 139 | unsigned long goal, unsigned long step) |
| 140 | { |
| 141 | bool can_block = true; |
| 142 | bool can_wait = false; |
| 143 | |
| 144 | if (!check_id(client_id)) { |
| 145 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 146 | return NULL; |
| 147 | } |
| 148 | |
| 149 | /* Asynchronous API requires notifier registration */ |
| 150 | if (!check_notifier(client_id)) { |
| 151 | pr_err("ocmem: No notifier registered for client %d\n", |
| 152 | client_id); |
| 153 | return NULL; |
| 154 | } |
| 155 | |
| 156 | if (min < OCMEM_MIN_ALLOC) { |
| 157 | pr_err("ocmem: requested min size %lx must be at least %x\n", |
| 158 | min, OCMEM_MIN_ALLOC); |
| 159 | return NULL; |
| 160 | } |
| 161 | |
| 162 | if (!IS_ALIGNED(min | goal | step, OCMEM_MIN_ALIGN)) { |
| 163 | pr_err("ocmem: Invalid alignment, args must be %x aligned\n", |
| 164 | OCMEM_MIN_ALIGN); |
| 165 | return NULL; |
| 166 | } |
| 167 | |
| 168 | return __ocmem_allocate_range(client_id, min, goal, |
| 169 | step, can_block, can_wait); |
| 170 | } |
| 171 | |
| 172 | struct ocmem_buf *ocmem_allocate_nb(int client_id, unsigned long size) |
| 173 | { |
| 174 | bool can_block = true; |
| 175 | bool can_wait = false; |
| 176 | |
| 177 | if (!check_id(client_id)) { |
| 178 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 179 | return NULL; |
| 180 | } |
| 181 | |
| 182 | /* Asynchronous API requires notifier registration */ |
| 183 | if (!check_notifier(client_id)) { |
| 184 | pr_err("ocmem: No notifier registered for client %d\n", |
| 185 | client_id); |
| 186 | return NULL; |
| 187 | } |
| 188 | |
| 189 | if (size < OCMEM_MIN_ALLOC) { |
| 190 | pr_err("ocmem: requested size %lx must be at least %x\n", |
| 191 | size, OCMEM_MIN_ALLOC); |
| 192 | return NULL; |
| 193 | } |
| 194 | |
| 195 | if (!IS_ALIGNED(size, OCMEM_MIN_ALIGN)) { |
| 196 | pr_err("ocmem: Invalid alignment, args must be %x aligned\n", |
| 197 | OCMEM_MIN_ALIGN); |
| 198 | return NULL; |
| 199 | } |
| 200 | |
| 201 | return __ocmem_allocate_range(client_id, 0, size, size, |
| 202 | can_block, can_wait); |
| 203 | |
| 204 | } |
| 205 | |
| 206 | int ocmem_free(int client_id, struct ocmem_buf *buffer) |
| 207 | { |
| 208 | if (!check_id(client_id)) { |
| 209 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 210 | return -EINVAL; |
| 211 | } |
| 212 | |
| 213 | if (!buffer) { |
| 214 | pr_err("ocmem: Invalid buffer\n"); |
| 215 | return -EINVAL; |
| 216 | } |
| 217 | |
| 218 | return __ocmem_free(client_id, buffer); |
| 219 | } |
| 220 | |
| 221 | int pre_validate_chunk_list(struct ocmem_map_list *list) |
| 222 | { |
| 223 | int i = 0; |
| 224 | struct ocmem_chunk *chunks; |
| 225 | |
| 226 | if (!list) |
| 227 | return -EINVAL; |
| 228 | |
| 229 | if (list->num_chunks > OCMEM_MAX_CHUNKS || list->num_chunks == 0) |
| 230 | return -EINVAL; |
| 231 | |
| 232 | chunks = list->chunks; |
| 233 | |
| 234 | if (!chunks) |
| 235 | return -EINVAL; |
| 236 | |
| 237 | for (i = 0; i < list->num_chunks; i++) { |
| 238 | if (!chunks[i].ddr_paddr || |
| 239 | chunks[i].size < MIN_CHUNK_SIZE) |
| 240 | return -EINVAL; |
| 241 | } |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | int ocmem_map(int client_id, struct ocmem_buf *buffer, |
| 246 | struct ocmem_map_list *list) |
| 247 | { |
| 248 | int ret = 0; |
| 249 | struct ocmem_handle *handle = NULL; |
| 250 | |
| 251 | if (!check_id(client_id)) { |
| 252 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 253 | return -EINVAL; |
| 254 | } |
| 255 | |
| 256 | /* Asynchronous API requires notifier registration */ |
| 257 | if (!check_notifier(client_id)) { |
| 258 | pr_err("ocmem: No notifier registered for client %d\n", |
| 259 | client_id); |
| 260 | return -EINVAL; |
| 261 | } |
| 262 | |
| 263 | if (!buffer) { |
| 264 | pr_err("ocmem: Invalid buffer\n"); |
| 265 | return -EINVAL; |
| 266 | } |
| 267 | |
| 268 | if (!pre_validate_chunk_list(list)) |
| 269 | return -EINVAL; |
| 270 | |
| 271 | handle = buffer_to_handle(buffer); |
| 272 | |
| 273 | if (!handle) |
| 274 | return -EINVAL; |
| 275 | |
| 276 | mutex_lock(&handle->handle_mutex); |
| 277 | ret = process_xfer(client_id, handle, list, TO_OCMEM); |
| 278 | mutex_unlock(&handle->handle_mutex); |
| 279 | return ret; |
| 280 | } |
| 281 | |
| 282 | int ocmem_unmap(int client_id, struct ocmem_buf *buffer, |
| 283 | struct ocmem_map_list *list) |
| 284 | { |
| 285 | |
| 286 | int ret = 0; |
| 287 | struct ocmem_handle *handle = NULL; |
| 288 | |
| 289 | if (!check_id(client_id)) { |
| 290 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 291 | return -EINVAL; |
| 292 | } |
| 293 | |
| 294 | /* Asynchronous API requires notifier registration */ |
| 295 | if (!check_notifier(client_id)) { |
| 296 | pr_err("ocmem: No notifier registered for client %d\n", |
| 297 | client_id); |
| 298 | return -EINVAL; |
| 299 | } |
| 300 | |
| 301 | if (!buffer) { |
| 302 | pr_err("ocmem: Invalid buffer\n"); |
| 303 | return -EINVAL; |
| 304 | } |
| 305 | |
| 306 | if (!pre_validate_chunk_list(list)) |
| 307 | return -EINVAL; |
| 308 | |
| 309 | handle = buffer_to_handle(buffer); |
| 310 | |
| 311 | if (!handle) |
| 312 | return -EINVAL; |
| 313 | |
| 314 | mutex_lock(&handle->handle_mutex); |
| 315 | ret = process_xfer(client_id, handle, list, TO_DDR); |
| 316 | mutex_unlock(&handle->handle_mutex); |
| 317 | return ret; |
| 318 | } |
| 319 | |
| 320 | unsigned long get_max_quota(int client_id) |
| 321 | { |
| 322 | if (!check_id(client_id)) { |
| 323 | pr_err("ocmem: Invalid client id: %d\n", client_id); |
| 324 | return 0x0; |
| 325 | } |
| 326 | return process_quota(client_id); |
| 327 | } |