blob: 9f2df273344ac43b88e97e8cbab0f7fca67eb3e6 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/fb.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/interrupt.h>
20#include <linux/workqueue.h>
21#include <linux/android_pmem.h>
22#include <linux/vmalloc.h>
23#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070024#include <linux/genlock.h>
Jordan Crousec9559e42012-04-05 16:55:56 -060025#include <linux/rbtree.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/ashmem.h>
27#include <linux/major.h>
Mitchel Humpherys53044702012-09-06 10:36:51 -070028#include <linux/msm_ion.h>
Jeremy Gebben4204d0f2012-03-01 16:06:21 -070029#include <linux/io.h>
30#include <mach/socinfo.h>
Jeremy Gebbenfec05c22013-05-28 16:59:29 -060031#include <linux/mman.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032
33#include "kgsl.h"
34#include "kgsl_debugfs.h"
35#include "kgsl_cffdump.h"
36#include "kgsl_log.h"
37#include "kgsl_sharedmem.h"
38#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060039#include "kgsl_trace.h"
Jeff Boodyfe6c39c2012-08-09 13:54:50 -060040#include "kgsl_sync.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42#undef MODULE_PARAM_PREFIX
43#define MODULE_PARAM_PREFIX "kgsl."
44
45static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060046static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047module_param_named(ptcount, kgsl_pagetable_count, int, 0);
48MODULE_PARM_DESC(kgsl_pagetable_count,
49"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060050module_param_named(mmutype, ksgl_mmu_type, charp, 0);
51MODULE_PARM_DESC(ksgl_mmu_type,
52"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053
Jordan Crouse8eab35a2011-10-12 16:57:48 -060054static struct ion_client *kgsl_ion_client;
55
Jordan Crouse0fdf3a02012-03-16 14:53:41 -060056/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070057 * @device - Pointer to the device structure
Jordan Crouse0fdf3a02012-03-16 14:53:41 -060058 * @ptbase - the pagetable base of the object
59 * @gpuaddr - the GPU address of the object
60 * @size - Size of the region to search
61 */
62
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070063struct kgsl_mem_entry *kgsl_get_mem_entry(struct kgsl_device *device,
64 unsigned int ptbase, unsigned int gpuaddr, unsigned int size)
Jordan Crouse0fdf3a02012-03-16 14:53:41 -060065{
66 struct kgsl_process_private *priv;
67 struct kgsl_mem_entry *entry;
68
69 mutex_lock(&kgsl_driver.process_mutex);
70
71 list_for_each_entry(priv, &kgsl_driver.process_list, list) {
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070072 if (!kgsl_mmu_pt_equal(&device->mmu, priv->pagetable, ptbase))
Jordan Crouse0fdf3a02012-03-16 14:53:41 -060073 continue;
74 spin_lock(&priv->mem_lock);
75 entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
76
77 if (entry) {
78 spin_unlock(&priv->mem_lock);
79 mutex_unlock(&kgsl_driver.process_mutex);
80 return entry;
81 }
82 spin_unlock(&priv->mem_lock);
83 }
84 mutex_unlock(&kgsl_driver.process_mutex);
85
86 return NULL;
87}
88EXPORT_SYMBOL(kgsl_get_mem_entry);
89
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090static inline struct kgsl_mem_entry *
91kgsl_mem_entry_create(void)
92{
93 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
94
95 if (!entry)
96 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
97 else
98 kref_init(&entry->refcount);
99
100 return entry;
101}
102
103void
104kgsl_mem_entry_destroy(struct kref *kref)
105{
106 struct kgsl_mem_entry *entry = container_of(kref,
107 struct kgsl_mem_entry,
108 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600109
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600110 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
111 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600113 /*
Laura Abbottb14ed962012-01-30 14:18:08 -0800114 * Ion takes care of freeing the sglist for us so
115 * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600116 * doesn't try to free it again
117 */
118
119 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600120 entry->memdesc.sg = NULL;
121 }
122
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 kgsl_sharedmem_free(&entry->memdesc);
124
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600125 switch (entry->memtype) {
126 case KGSL_MEM_ENTRY_PMEM:
127 case KGSL_MEM_ENTRY_ASHMEM:
128 if (entry->priv_data)
129 fput(entry->priv_data);
130 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600131 case KGSL_MEM_ENTRY_ION:
132 ion_free(kgsl_ion_client, entry->priv_data);
133 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 }
135
136 kfree(entry);
137}
138EXPORT_SYMBOL(kgsl_mem_entry_destroy);
139
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600140/**
141 * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree
142 * @process: the process that owns the memory
143 * @entry: the memory entry
144 *
145 * Insert a kgsl_mem_entry in to the rb_tree for searching by GPU address.
146 * Not all mem_entries will have gpu addresses when first created, so this
147 * function may be called after creation when the GPU address is finally
148 * assigned.
149 */
150static void
151kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
152 struct kgsl_mem_entry *entry)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153{
Jordan Crousec9559e42012-04-05 16:55:56 -0600154 struct rb_node **node;
155 struct rb_node *parent = NULL;
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 spin_lock(&process->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -0600158
159 node = &process->mem_rb.rb_node;
160
161 while (*node) {
162 struct kgsl_mem_entry *cur;
163
164 parent = *node;
165 cur = rb_entry(parent, struct kgsl_mem_entry, node);
166
167 if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr)
168 node = &parent->rb_left;
169 else
170 node = &parent->rb_right;
171 }
172
173 rb_link_node(&entry->node, parent, node);
174 rb_insert_color(&entry->node, &process->mem_rb);
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 spin_unlock(&process->mem_lock);
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600177}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600179/**
180 * kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
181 * @entry: the memory entry
182 * @process: the owner process
183 *
184 * Attach a newly created mem_entry to its owner process so that
185 * it can be found later. The mem_entry will be added to mem_idr and have
186 * its 'id' field assigned. If the GPU address has been set, the entry
187 * will also be added to the mem_rb tree.
188 *
189 * @returns - 0 on success or error code on failure.
190 */
191static int
192kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
193 struct kgsl_process_private *process)
194{
195 int ret;
196
197 while (1) {
198 if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) {
199 ret = -ENOMEM;
200 goto err;
201 }
202
203 spin_lock(&process->mem_lock);
204 ret = idr_get_new_above(&process->mem_idr, entry, 1,
205 &entry->id);
206 spin_unlock(&process->mem_lock);
207
208 if (ret == 0)
209 break;
210 else if (ret != -EAGAIN)
211 goto err;
212 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 entry->priv = process;
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600214
215 if (entry->memdesc.gpuaddr != 0)
216 kgsl_mem_entry_track_gpuaddr(process, entry);
217err:
218 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219}
220
Jordan Crouse00714012012-03-16 14:53:40 -0600221/* Detach a memory entry from a process and unmap it from the MMU */
222
223static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
224{
225 if (entry == NULL)
226 return;
227
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600228 spin_lock(&entry->priv->mem_lock);
229
230 if (entry->id != 0)
231 idr_remove(&entry->priv->mem_idr, entry->id);
232 entry->id = 0;
233
234 if (entry->memdesc.gpuaddr != 0)
235 rb_erase(&entry->node, &entry->priv->mem_rb);
236
237 spin_unlock(&entry->priv->mem_lock);
238
Jordan Crouse00714012012-03-16 14:53:40 -0600239 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
240 entry->priv = NULL;
241
242 kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
243
244 kgsl_mem_entry_put(entry);
245}
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247/* Allocate a new context id */
248
249static struct kgsl_context *
250kgsl_create_context(struct kgsl_device_private *dev_priv)
251{
252 struct kgsl_context *context;
253 int ret, id;
254
255 context = kzalloc(sizeof(*context), GFP_KERNEL);
256
257 if (context == NULL)
258 return NULL;
259
260 while (1) {
261 if (idr_pre_get(&dev_priv->device->context_idr,
262 GFP_KERNEL) == 0) {
263 kfree(context);
264 return NULL;
265 }
266
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700267 ret = idr_get_new_above(&dev_priv->device->context_idr,
268 context, 1, &id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269
270 if (ret != -EAGAIN)
271 break;
272 }
273
274 if (ret) {
275 kfree(context);
276 return NULL;
277 }
278
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700279 /* MAX - 1, there is one memdesc in memstore for device info */
280 if (id >= KGSL_MEMSTORE_MAX) {
281 KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
282 "ctxts due to memstore limitation\n",
283 KGSL_MEMSTORE_MAX);
284 idr_remove(&dev_priv->device->context_idr, id);
285 kfree(context);
286 return NULL;
287 }
288
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600289 kref_init(&context->refcount);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 context->id = id;
291 context->dev_priv = dev_priv;
292
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600293 if (kgsl_sync_timeline_create(context)) {
294 idr_remove(&dev_priv->device->context_idr, id);
Jordan Crouseef3456c2013-01-04 16:46:51 -0700295 goto func_end;
296 }
297
298 /* Initialize the pending event list */
299 INIT_LIST_HEAD(&context->events);
300
301 /*
302 * Initialize the node that is used to maintain the master list of
303 * contexts with pending events in the device structure. Normally we
304 * wouldn't take the time to initalize a node but at event add time we
305 * call list_empty() on the node as a quick way of determining if the
306 * context is already in the master list so it needs to always be either
307 * active or in an unused but initialized state
308 */
309
310 INIT_LIST_HEAD(&context->events_list);
311
312func_end:
313 if (ret) {
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600314 kfree(context);
315 return NULL;
316 }
317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318 return context;
319}
320
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600321/**
322 * kgsl_context_detach - Release the "master" context reference
323 * @context - The context that will be detached
324 *
325 * This is called when a context becomes unusable, because userspace
326 * has requested for it to be destroyed. The context itself may
327 * exist a bit longer until its reference count goes to zero.
328 * Other code referencing the context can detect that it has been
329 * detached because the context id will be set to KGSL_CONTEXT_INVALID.
330 */
331void
332kgsl_context_detach(struct kgsl_context *context)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333{
334 int id;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600335 struct kgsl_device *device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 if (context == NULL)
337 return;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600338 device = context->dev_priv->device;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -0600339 trace_kgsl_context_detach(device, context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 id = context->id;
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600341
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600342 if (device->ftbl->drawctxt_destroy)
343 device->ftbl->drawctxt_destroy(device, context);
344 /*device specific drawctxt_destroy MUST clean up devctxt */
345 BUG_ON(context->devctxt);
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600346 /*
347 * Cancel events after the device-specific context is
348 * destroyed, to avoid possibly freeing memory while
349 * it is still in use by the GPU.
350 */
351 kgsl_cancel_events_ctxt(device, context);
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600352 idr_remove(&device->context_idr, id);
353 context->id = KGSL_CONTEXT_INVALID;
354 kgsl_context_put(context);
355}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600357void
358kgsl_context_destroy(struct kref *kref)
359{
360 struct kgsl_context *context = container_of(kref, struct kgsl_context,
361 refcount);
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600362 kgsl_sync_timeline_destroy(context);
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600363 kfree(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364}
365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366static void kgsl_check_idle_locked(struct kgsl_device *device)
367{
368 if (device->pwrctrl.nap_allowed == true &&
369 device->state == KGSL_STATE_ACTIVE &&
370 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700371 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Lucille Sylvester721f7e72012-08-21 16:31:26 -0600372 kgsl_pwrscale_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 if (kgsl_pwrctrl_sleep(device) != 0)
374 mod_timer(&device->idle_timer,
375 jiffies +
376 device->pwrctrl.interval_timeout);
377 }
378}
379
380static void kgsl_check_idle(struct kgsl_device *device)
381{
382 mutex_lock(&device->mutex);
383 kgsl_check_idle_locked(device);
384 mutex_unlock(&device->mutex);
385}
386
387struct kgsl_device *kgsl_get_device(int dev_idx)
388{
389 int i;
390 struct kgsl_device *ret = NULL;
391
392 mutex_lock(&kgsl_driver.devlock);
393
394 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
395 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
396 ret = kgsl_driver.devp[i];
397 break;
398 }
399 }
400
401 mutex_unlock(&kgsl_driver.devlock);
402 return ret;
403}
404EXPORT_SYMBOL(kgsl_get_device);
405
406static struct kgsl_device *kgsl_get_minor(int minor)
407{
408 struct kgsl_device *ret = NULL;
409
410 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
411 return NULL;
412
413 mutex_lock(&kgsl_driver.devlock);
414 ret = kgsl_driver.devp[minor];
415 mutex_unlock(&kgsl_driver.devlock);
416
417 return ret;
418}
419
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700420int kgsl_check_timestamp(struct kgsl_device *device,
421 struct kgsl_context *context, unsigned int timestamp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422{
423 unsigned int ts_processed;
424
Jeremy Gebben731dac52012-05-10 11:13:42 -0600425 ts_processed = kgsl_readtimestamp(device, context,
426 KGSL_TIMESTAMP_RETIRED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427
Jordan Crousee6239dd2011-11-17 13:39:21 -0700428 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429}
430EXPORT_SYMBOL(kgsl_check_timestamp);
431
432static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
433{
434 int status = -EINVAL;
435 unsigned int nap_allowed_saved;
436 struct kgsl_pwrscale_policy *policy_saved;
437
438 if (!device)
439 return -EINVAL;
440
441 KGSL_PWR_WARN(device, "suspend start\n");
442
443 mutex_lock(&device->mutex);
444 nap_allowed_saved = device->pwrctrl.nap_allowed;
445 device->pwrctrl.nap_allowed = false;
446 policy_saved = device->pwrscale.policy;
447 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700448 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 /* Make sure no user process is waiting for a timestamp *
450 * before supending */
Harsh Vardhan Dwivediae0e2ec2013-05-21 15:19:16 -0600451 if (device->state == KGSL_STATE_ACTIVE && device->active_cnt != 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 mutex_unlock(&device->mutex);
453 wait_for_completion(&device->suspend_gate);
454 mutex_lock(&device->mutex);
455 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800456 /* Don't let the timer wake us during suspended sleep. */
457 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458 switch (device->state) {
459 case KGSL_STATE_INIT:
460 break;
461 case KGSL_STATE_ACTIVE:
462 /* Wait for the device to become idle */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600463 device->ftbl->idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 case KGSL_STATE_NAP:
465 case KGSL_STATE_SLEEP:
466 /* Get the completion ready to be waited upon. */
467 INIT_COMPLETION(device->hwaccess_gate);
468 device->ftbl->suspend_context(device);
469 device->ftbl->stop(device);
Suman Tatiraju48e72762012-05-03 11:12:03 -0700470 pm_qos_update_request(&device->pm_qos_req_dma,
471 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700472 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700474 case KGSL_STATE_SLUMBER:
475 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700476 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700477 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 default:
479 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
480 device->id);
481 goto end;
482 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700483 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 device->pwrctrl.nap_allowed = nap_allowed_saved;
485 device->pwrscale.policy = policy_saved;
486 status = 0;
487
488end:
489 mutex_unlock(&device->mutex);
490 KGSL_PWR_WARN(device, "suspend end\n");
491 return status;
492}
493
494static int kgsl_resume_device(struct kgsl_device *device)
495{
496 int status = -EINVAL;
497
498 if (!device)
499 return -EINVAL;
500
501 KGSL_PWR_WARN(device, "resume start\n");
502 mutex_lock(&device->mutex);
503 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700504 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700505 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 complete_all(&device->hwaccess_gate);
507 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700508 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 KGSL_PWR_WARN(device, "resume end\n");
512 return status;
513}
514
515static int kgsl_suspend(struct device *dev)
516{
517
518 pm_message_t arg = {0};
519 struct kgsl_device *device = dev_get_drvdata(dev);
520 return kgsl_suspend_device(device, arg);
521}
522
523static int kgsl_resume(struct device *dev)
524{
525 struct kgsl_device *device = dev_get_drvdata(dev);
526 return kgsl_resume_device(device);
527}
528
529static int kgsl_runtime_suspend(struct device *dev)
530{
531 return 0;
532}
533
534static int kgsl_runtime_resume(struct device *dev)
535{
536 return 0;
537}
538
539const struct dev_pm_ops kgsl_pm_ops = {
540 .suspend = kgsl_suspend,
541 .resume = kgsl_resume,
542 .runtime_suspend = kgsl_runtime_suspend,
543 .runtime_resume = kgsl_runtime_resume,
544};
545EXPORT_SYMBOL(kgsl_pm_ops);
546
547void kgsl_early_suspend_driver(struct early_suspend *h)
548{
549 struct kgsl_device *device = container_of(h,
550 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700551 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530552 mutex_lock(&device->mutex);
Lucille Sylvester4c889b72012-08-03 11:25:25 -0600553 device->pwrctrl.restore_slumber = true;
Lucille Sylvester344e4622012-01-18 15:53:21 -0700554 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700555 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530556 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700557 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558}
559EXPORT_SYMBOL(kgsl_early_suspend_driver);
560
561int kgsl_suspend_driver(struct platform_device *pdev,
562 pm_message_t state)
563{
564 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
565 return kgsl_suspend_device(device, state);
566}
567EXPORT_SYMBOL(kgsl_suspend_driver);
568
569int kgsl_resume_driver(struct platform_device *pdev)
570{
571 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
572 return kgsl_resume_device(device);
573}
574EXPORT_SYMBOL(kgsl_resume_driver);
575
576void kgsl_late_resume_driver(struct early_suspend *h)
577{
578 struct kgsl_device *device = container_of(h,
579 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700580 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530581 mutex_lock(&device->mutex);
Lucille Sylvester4c889b72012-08-03 11:25:25 -0600582 device->pwrctrl.restore_slumber = false;
Nilesh Shah94bdf2f2012-05-02 22:42:57 +0530583 if (device->pwrscale.policy == NULL)
584 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700585 kgsl_pwrctrl_wake(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530586 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700587 kgsl_check_idle(device);
588 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589}
590EXPORT_SYMBOL(kgsl_late_resume_driver);
591
592/* file operations */
593static struct kgsl_process_private *
594kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
595{
596 struct kgsl_process_private *private;
597
598 mutex_lock(&kgsl_driver.process_mutex);
599 list_for_each_entry(private, &kgsl_driver.process_list, list) {
600 if (private->pid == task_tgid_nr(current)) {
601 private->refcnt++;
602 goto out;
603 }
604 }
605
606 /* no existing process private found for this dev_priv, create one */
607 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
608 if (private == NULL) {
609 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
610 sizeof(struct kgsl_process_private));
611 goto out;
612 }
613
614 spin_lock_init(&private->mem_lock);
615 private->refcnt = 1;
616 private->pid = task_tgid_nr(current);
Jordan Crousec9559e42012-04-05 16:55:56 -0600617 private->mem_rb = RB_ROOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600619 idr_init(&private->mem_idr);
620
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600621 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 {
623 unsigned long pt_name;
624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 private->pagetable = kgsl_mmu_getpagetable(pt_name);
627 if (private->pagetable == NULL) {
628 kfree(private);
629 private = NULL;
630 goto out;
631 }
632 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
634 list_add(&private->list, &kgsl_driver.process_list);
635
636 kgsl_process_init_sysfs(private);
Jeremy Gebbenddf93012012-09-25 10:57:38 -0600637 kgsl_process_init_debugfs(private);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638
639out:
640 mutex_unlock(&kgsl_driver.process_mutex);
641 return private;
642}
643
644static void
645kgsl_put_process_private(struct kgsl_device *device,
646 struct kgsl_process_private *private)
647{
648 struct kgsl_mem_entry *entry = NULL;
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600649 int next = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650
651 if (!private)
652 return;
653
654 mutex_lock(&kgsl_driver.process_mutex);
655
656 if (--private->refcnt)
657 goto unlock;
658
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659 kgsl_process_uninit_sysfs(private);
Jeremy Gebbenddf93012012-09-25 10:57:38 -0600660 debugfs_remove_recursive(private->debug_root);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661
662 list_del(&private->list);
663
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600664 while (1) {
665 rcu_read_lock();
666 entry = idr_get_next(&private->mem_idr, &next);
667 rcu_read_unlock();
668 if (entry == NULL)
669 break;
Jordan Crouse00714012012-03-16 14:53:40 -0600670 kgsl_mem_entry_detach_process(entry);
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600671 /*
672 * Always start back at the beginning, to
673 * ensure all entries are removed,
674 * like list_for_each_entry_safe.
675 */
676 next = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 kgsl_mmu_putpagetable(private->pagetable);
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600679 idr_destroy(&private->mem_idr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700680 kfree(private);
681unlock:
682 mutex_unlock(&kgsl_driver.process_mutex);
683}
684
685static int kgsl_release(struct inode *inodep, struct file *filep)
686{
687 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600688 struct kgsl_device_private *dev_priv = filep->private_data;
689 struct kgsl_process_private *private = dev_priv->process_priv;
690 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 struct kgsl_context *context;
692 int next = 0;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 filep->private_data = NULL;
695
696 mutex_lock(&device->mutex);
697 kgsl_check_suspended(device);
698
699 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600700 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 if (context == NULL)
702 break;
703
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600704 if (context->dev_priv == dev_priv)
705 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706
707 next = next + 1;
708 }
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600709 /*
710 * Clean up any to-be-freed entries that belong to this
711 * process and this device. This is done after the context
712 * are destroyed to avoid possibly freeing memory while
713 * it is still in use by the GPU.
714 */
715 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716
717 device->open_count--;
718 if (device->open_count == 0) {
719 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700720 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722
723 mutex_unlock(&device->mutex);
724 kfree(dev_priv);
725
726 kgsl_put_process_private(device, private);
727
728 pm_runtime_put(device->parentdev);
729 return result;
730}
731
732static int kgsl_open(struct inode *inodep, struct file *filep)
733{
734 int result;
735 struct kgsl_device_private *dev_priv;
736 struct kgsl_device *device;
737 unsigned int minor = iminor(inodep);
738
739 device = kgsl_get_minor(minor);
740 BUG_ON(device == NULL);
741
742 if (filep->f_flags & O_EXCL) {
743 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
744 return -EBUSY;
745 }
746
747 result = pm_runtime_get_sync(device->parentdev);
748 if (result < 0) {
749 KGSL_DRV_ERR(device,
750 "Runtime PM: Unable to wake up the device, rc = %d\n",
751 result);
752 return result;
753 }
754 result = 0;
755
756 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
757 if (dev_priv == NULL) {
758 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
759 sizeof(struct kgsl_device_private));
760 result = -ENOMEM;
761 goto err_pmruntime;
762 }
763
764 dev_priv->device = device;
765 filep->private_data = dev_priv;
766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767 mutex_lock(&device->mutex);
768 kgsl_check_suspended(device);
769
770 if (device->open_count == 0) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700771 kgsl_sharedmem_set(&device->memstore, 0, 0,
772 device->memstore.size);
773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700774 result = device->ftbl->start(device, true);
775
776 if (result) {
777 mutex_unlock(&device->mutex);
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600778 goto err_freedevpriv;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700779 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700780 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700781 }
782 device->open_count++;
783 mutex_unlock(&device->mutex);
784
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600785 /*
786 * Get file (per process) private struct. This must be done
787 * after the first start so that the global pagetable mappings
788 * are set up before we create the per-process pagetable.
789 */
790 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
791 if (dev_priv->process_priv == NULL) {
792 result = -ENOMEM;
793 goto err_stop;
794 }
795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
797 device->name, kgsl_mmu_enabled() ? "on" : "off",
798 kgsl_pagetable_count);
799
800 return result;
801
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600802err_stop:
803 mutex_lock(&device->mutex);
804 device->open_count--;
805 if (device->open_count == 0) {
806 result = device->ftbl->stop(device);
807 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
808 }
809 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810err_freedevpriv:
811 filep->private_data = NULL;
812 kfree(dev_priv);
813err_pmruntime:
814 pm_runtime_put(device->parentdev);
815 return result;
816}
817
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700818/*call with private->mem_lock locked */
819struct kgsl_mem_entry *
820kgsl_sharedmem_find_region(struct kgsl_process_private *private,
Jordan Crousec9559e42012-04-05 16:55:56 -0600821 unsigned int gpuaddr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822{
Jordan Crousec9559e42012-04-05 16:55:56 -0600823 struct rb_node *node = private->mem_rb.rb_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700824
Jordan Crousee22e21d2012-07-23 14:34:06 -0600825 if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
826 return NULL;
827
Jordan Crousec9559e42012-04-05 16:55:56 -0600828 while (node != NULL) {
829 struct kgsl_mem_entry *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700830
Jordan Crousec9559e42012-04-05 16:55:56 -0600831 entry = rb_entry(node, struct kgsl_mem_entry, node);
832
833
834 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size))
835 return entry;
836
837 if (gpuaddr < entry->memdesc.gpuaddr)
838 node = node->rb_left;
839 else if (gpuaddr >=
840 (entry->memdesc.gpuaddr + entry->memdesc.size))
841 node = node->rb_right;
842 else {
843 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 }
845 }
846
Jordan Crousec9559e42012-04-05 16:55:56 -0600847 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700848}
849EXPORT_SYMBOL(kgsl_sharedmem_find_region);
850
Jordan Crousec9559e42012-04-05 16:55:56 -0600851/*call with private->mem_lock locked */
852static inline struct kgsl_mem_entry *
853kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
854{
855 return kgsl_sharedmem_find_region(private, gpuaddr, 1);
856}
857
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600858/**
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600859 * kgsl_sharedmem_region_empty - Check if an addression region is empty
860 *
861 * @private: private data for the process to check.
862 * @gpuaddr: start address of the region
863 * @size: length of the region.
864 *
865 * Checks that there are no existing allocations within an address
866 * region. Note that unlike other kgsl_sharedmem* search functions,
867 * this one manages locking on its own.
868 */
869int
870kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
871 unsigned int gpuaddr, size_t size)
872{
873 int result = 1;
874 unsigned int gpuaddr_end = gpuaddr + size;
875
876 struct rb_node *node = private->mem_rb.rb_node;
877
878 if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
879 return 0;
880
881 /* don't overflow */
882 if (gpuaddr_end < gpuaddr)
883 return 0;
884
885 spin_lock(&private->mem_lock);
886 node = private->mem_rb.rb_node;
887 while (node != NULL) {
888 struct kgsl_mem_entry *entry;
889 unsigned int memdesc_start, memdesc_end;
890
891 entry = rb_entry(node, struct kgsl_mem_entry, node);
892
893 memdesc_start = entry->memdesc.gpuaddr;
894 memdesc_end = memdesc_start
895 + kgsl_memdesc_mmapsize(&entry->memdesc);
896
897 if (gpuaddr_end <= memdesc_start)
898 node = node->rb_left;
899 else if (memdesc_end <= gpuaddr)
900 node = node->rb_right;
901 else {
902 result = 0;
903 break;
904 }
905 }
906 spin_unlock(&private->mem_lock);
907 return result;
908}
909
910/**
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600911 * kgsl_sharedmem_find_id - find a memory entry by id
912 * @process: the owning process
913 * @id: id to find
914 *
915 * @returns - the mem_entry or NULL
916 */
917static inline struct kgsl_mem_entry *
918kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
919{
920 struct kgsl_mem_entry *entry;
921
922 rcu_read_lock();
923 entry = idr_find(&process->mem_idr, id);
924 rcu_read_unlock();
925
926 return entry;
927}
928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929/*call all ioctl sub functions with driver locked*/
930static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
931 unsigned int cmd, void *data)
932{
933 int result = 0;
934 struct kgsl_device_getproperty *param = data;
935
936 switch (param->type) {
937 case KGSL_PROP_VERSION:
938 {
939 struct kgsl_version version;
940 if (param->sizebytes != sizeof(version)) {
941 result = -EINVAL;
942 break;
943 }
944
945 version.drv_major = KGSL_VERSION_MAJOR;
946 version.drv_minor = KGSL_VERSION_MINOR;
947 version.dev_major = dev_priv->device->ver_major;
948 version.dev_minor = dev_priv->device->ver_minor;
949
950 if (copy_to_user(param->value, &version, sizeof(version)))
951 result = -EFAULT;
952
953 break;
954 }
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700955 case KGSL_PROP_GPU_RESET_STAT:
956 {
957 /* Return reset status of given context and clear it */
958 uint32_t id;
959 struct kgsl_context *context;
960
961 if (param->sizebytes != sizeof(unsigned int)) {
962 result = -EINVAL;
963 break;
964 }
965 /* We expect the value passed in to contain the context id */
966 if (copy_from_user(&id, param->value,
967 sizeof(unsigned int))) {
968 result = -EFAULT;
969 break;
970 }
971 context = kgsl_find_context(dev_priv, id);
972 if (!context) {
973 result = -EINVAL;
974 break;
975 }
976 /*
977 * Copy the reset status to value which also serves as
978 * the out parameter
979 */
980 if (copy_to_user(param->value, &(context->reset_status),
981 sizeof(unsigned int))) {
982 result = -EFAULT;
983 break;
984 }
985 /* Clear reset status once its been queried */
986 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
987 break;
988 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 default:
990 result = dev_priv->device->ftbl->getproperty(
991 dev_priv->device, param->type,
992 param->value, param->sizebytes);
993 }
994
995
996 return result;
997}
998
Jordan Crouseed7dd7f2012-03-29 13:16:02 -0600999static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
1000 unsigned int cmd, void *data)
1001{
1002 int result = 0;
1003 /* The getproperty struct is reused for setproperty too */
1004 struct kgsl_device_getproperty *param = data;
1005
1006 if (dev_priv->device->ftbl->setproperty)
1007 result = dev_priv->device->ftbl->setproperty(
1008 dev_priv->device, param->type,
1009 param->value, param->sizebytes);
1010
1011 return result;
1012}
1013
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001014static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
1015 struct kgsl_context *context,
1016 unsigned int timestamp,
1017 unsigned int timeout)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018{
1019 int result = 0;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001020 struct kgsl_device *device = dev_priv->device;
1021 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001022
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001023 /* Set the active count so that suspend doesn't do the wrong thing */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001025 device->active_cnt++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001027 trace_kgsl_waittimestamp_entry(device, context_id,
1028 kgsl_readtimestamp(device, context,
1029 KGSL_TIMESTAMP_RETIRED),
1030 timestamp, timeout);
Norman Geed7402ff2011-10-28 08:51:11 -06001031
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001032 result = device->ftbl->waittimestamp(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001033 context, timestamp, timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001035 trace_kgsl_waittimestamp_exit(device,
1036 kgsl_readtimestamp(device, context,
1037 KGSL_TIMESTAMP_RETIRED),
1038 result);
Norman Geed7402ff2011-10-28 08:51:11 -06001039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 /* Fire off any pending suspend operations that are in flight */
Jordan Crouseab601992013-03-05 11:18:20 -07001041 kgsl_active_count_put(dev_priv->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042
1043 return result;
1044}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001046static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
1047 *dev_priv, unsigned int cmd,
1048 void *data)
1049{
1050 struct kgsl_device_waittimestamp *param = data;
1051
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001052 return _device_waittimestamp(dev_priv, NULL,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001053 param->timestamp, param->timeout);
1054}
1055
1056static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
1057 *dev_priv, unsigned int cmd,
1058 void *data)
1059{
1060 struct kgsl_device_waittimestamp_ctxtid *param = data;
1061 struct kgsl_context *context;
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001062 int result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001063
1064 context = kgsl_find_context(dev_priv, param->context_id);
Harsh Vardhan Dwivedifdd17d42013-02-28 16:59:57 -07001065 if (context == NULL)
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001066 return -EINVAL;
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001067 /*
1068 * A reference count is needed here, because waittimestamp may
1069 * block with the device mutex unlocked and userspace could
1070 * request for the context to be destroyed during that time.
1071 */
1072 kgsl_context_get(context);
1073 result = _device_waittimestamp(dev_priv, context,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001074 param->timestamp, param->timeout);
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001075 kgsl_context_put(context);
1076 return result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001077}
1078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
1080 unsigned int cmd, void *data)
1081{
1082 int result = 0;
1083 struct kgsl_ringbuffer_issueibcmds *param = data;
1084 struct kgsl_ibdesc *ibdesc;
1085 struct kgsl_context *context;
1086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1088 if (context == NULL) {
1089 result = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 goto done;
1091 }
1092
1093 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
1094 KGSL_DRV_INFO(dev_priv->device,
1095 "Using IB list mode for ib submission, numibs: %d\n",
1096 param->numibs);
1097 if (!param->numibs) {
1098 KGSL_DRV_ERR(dev_priv->device,
1099 "Invalid numibs as parameter: %d\n",
1100 param->numibs);
1101 result = -EINVAL;
1102 goto done;
1103 }
1104
Jordan Crouse834c8592012-07-24 10:06:35 -06001105 /*
1106 * Put a reasonable upper limit on the number of IBs that can be
1107 * submitted
1108 */
1109
1110 if (param->numibs > 10000) {
1111 KGSL_DRV_ERR(dev_priv->device,
1112 "Too many IBs submitted. count: %d max 10000\n",
1113 param->numibs);
1114 result = -EINVAL;
1115 goto done;
1116 }
1117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
1119 GFP_KERNEL);
1120 if (!ibdesc) {
1121 KGSL_MEM_ERR(dev_priv->device,
1122 "kzalloc(%d) failed\n",
1123 sizeof(struct kgsl_ibdesc) * param->numibs);
1124 result = -ENOMEM;
1125 goto done;
1126 }
1127
1128 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
1129 sizeof(struct kgsl_ibdesc) * param->numibs)) {
1130 result = -EFAULT;
1131 KGSL_DRV_ERR(dev_priv->device,
1132 "copy_from_user failed\n");
1133 goto free_ibdesc;
1134 }
1135 } else {
1136 KGSL_DRV_INFO(dev_priv->device,
1137 "Using single IB submission mode for ib submission\n");
1138 /* If user space driver is still using the old mode of
1139 * submitting single ib then we need to support that as well */
1140 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
1141 if (!ibdesc) {
1142 KGSL_MEM_ERR(dev_priv->device,
1143 "kzalloc(%d) failed\n",
1144 sizeof(struct kgsl_ibdesc));
1145 result = -ENOMEM;
1146 goto done;
1147 }
1148 ibdesc[0].gpuaddr = param->ibdesc_addr;
1149 ibdesc[0].sizedwords = param->numibs;
1150 param->numibs = 1;
1151 }
1152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
1154 context,
1155 ibdesc,
1156 param->numibs,
1157 &param->timestamp,
1158 param->flags);
1159
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001160 trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
Wei Zouc8c01632012-03-24 17:27:26 -07001161
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162free_ibdesc:
1163 kfree(ibdesc);
1164done:
1165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 return result;
1167}
1168
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001169static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
1170 struct kgsl_context *context, unsigned int type,
1171 unsigned int *timestamp)
1172{
Jeremy Gebben731dac52012-05-10 11:13:42 -06001173 *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001174
1175 trace_kgsl_readtimestamp(dev_priv->device,
1176 context ? context->id : KGSL_MEMSTORE_GLOBAL,
1177 type, *timestamp);
1178
1179 return 0;
1180}
1181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1183 *dev_priv, unsigned int cmd,
1184 void *data)
1185{
1186 struct kgsl_cmdstream_readtimestamp *param = data;
1187
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001188 return _cmdstream_readtimestamp(dev_priv, NULL,
1189 param->type, &param->timestamp);
1190}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001192static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
1193 *dev_priv, unsigned int cmd,
1194 void *data)
1195{
1196 struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
1197 struct kgsl_context *context;
Norman Geed7402ff2011-10-28 08:51:11 -06001198
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001199 context = kgsl_find_context(dev_priv, param->context_id);
Harsh Vardhan Dwivedifdd17d42013-02-28 16:59:57 -07001200 if (context == NULL)
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001201 return -EINVAL;
Harsh Vardhan Dwivedifdd17d42013-02-28 16:59:57 -07001202
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001203
1204 return _cmdstream_readtimestamp(dev_priv, context,
1205 param->type, &param->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206}
1207
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001208static void kgsl_freemem_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001209 void *priv, u32 id, u32 timestamp)
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001210{
1211 struct kgsl_mem_entry *entry = priv;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001212 trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
Jordan Crouse00714012012-03-16 14:53:40 -06001213 kgsl_mem_entry_detach_process(entry);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001214}
1215
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001216static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
1217 unsigned int gpuaddr, struct kgsl_context *context,
1218 unsigned int timestamp, unsigned int type)
1219{
1220 int result = 0;
1221 struct kgsl_mem_entry *entry = NULL;
1222 struct kgsl_device *device = dev_priv->device;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001223 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
1224
1225 spin_lock(&dev_priv->process_priv->mem_lock);
1226 entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
1227 spin_unlock(&dev_priv->process_priv->mem_lock);
1228
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001229 if (!entry) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001230 KGSL_DRV_ERR(dev_priv->device,
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001231 "invalid gpuaddr %08x\n", gpuaddr);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001232 result = -EINVAL;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001233 goto done;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001234 }
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001235 trace_kgsl_mem_timestamp_queue(device, entry, context_id,
1236 kgsl_readtimestamp(device, context,
1237 KGSL_TIMESTAMP_RETIRED),
1238 timestamp);
1239 result = kgsl_add_event(dev_priv->device, context_id, timestamp,
1240 kgsl_freemem_event_cb, entry, dev_priv);
1241done:
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001242 return result;
1243}
1244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1246 *dev_priv, unsigned int cmd,
1247 void *data)
1248{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001249 struct kgsl_cmdstream_freememontimestamp *param = data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001251 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1252 NULL, param->timestamp, param->type);
1253}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001255static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
1256 struct kgsl_device_private
1257 *dev_priv, unsigned int cmd,
1258 void *data)
1259{
1260 struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
1261 struct kgsl_context *context;
Jeremy Gebbena5859272012-03-01 12:46:28 -07001262
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001263 context = kgsl_find_context(dev_priv, param->context_id);
1264 if (context == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001265 KGSL_DRV_ERR(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001266 "invalid drawctxt context_id %d\n", param->context_id);
1267 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 }
1269
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001270 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1271 context, param->timestamp, param->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272}
1273
1274static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1275 unsigned int cmd, void *data)
1276{
1277 int result = 0;
1278 struct kgsl_drawctxt_create *param = data;
1279 struct kgsl_context *context = NULL;
1280
1281 context = kgsl_create_context(dev_priv);
1282
1283 if (context == NULL) {
1284 result = -ENOMEM;
1285 goto done;
1286 }
1287
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001288 if (dev_priv->device->ftbl->drawctxt_create) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001289 result = dev_priv->device->ftbl->drawctxt_create(
1290 dev_priv->device, dev_priv->process_priv->pagetable,
1291 context, param->flags);
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001292 if (result)
1293 goto done;
1294 }
1295 trace_kgsl_context_create(dev_priv->device, context, param->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001296 param->drawctxt_id = context->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297done:
1298 if (result && context)
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001299 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300
1301 return result;
1302}
1303
1304static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1305 unsigned int cmd, void *data)
1306{
1307 int result = 0;
1308 struct kgsl_drawctxt_destroy *param = data;
1309 struct kgsl_context *context;
1310
1311 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1312
1313 if (context == NULL) {
1314 result = -EINVAL;
1315 goto done;
1316 }
1317
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001318 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001319done:
1320 return result;
1321}
1322
1323static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1324 unsigned int cmd, void *data)
1325{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 struct kgsl_sharedmem_free *param = data;
1327 struct kgsl_process_private *private = dev_priv->process_priv;
1328 struct kgsl_mem_entry *entry = NULL;
1329
1330 spin_lock(&private->mem_lock);
1331 entry = kgsl_sharedmem_find(private, param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332 spin_unlock(&private->mem_lock);
1333
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001334 if (!entry) {
1335 KGSL_MEM_INFO(dev_priv->device, "invalid gpuaddr %08x\n",
1336 param->gpuaddr);
1337 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 }
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001339 trace_kgsl_mem_free(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001341 kgsl_mem_entry_detach_process(entry);
1342 return 0;
1343}
1344
1345static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
1346 unsigned int cmd, void *data)
1347{
1348 struct kgsl_gpumem_free_id *param = data;
1349 struct kgsl_process_private *private = dev_priv->process_priv;
1350 struct kgsl_mem_entry *entry = NULL;
1351
1352 entry = kgsl_sharedmem_find_id(private, param->id);
1353
1354 if (!entry) {
1355 KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id);
1356 return -EINVAL;
1357 }
1358 trace_kgsl_mem_free(entry);
1359
1360 kgsl_mem_entry_detach_process(entry);
1361 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362}
1363
1364static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1365{
1366 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001367
1368 down_read(&current->mm->mmap_sem);
1369 vma = find_vma(current->mm, addr);
1370 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001371 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001372 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001374 return vma;
1375}
1376
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377static inline int _check_region(unsigned long start, unsigned long size,
1378 uint64_t len)
1379{
1380 uint64_t end = ((uint64_t) start) + size;
1381 return (end > len);
1382}
1383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001384static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1385 unsigned long *vstart, struct file **filep)
1386{
1387 struct file *fbfile;
1388 int ret = 0;
1389 dev_t rdev;
1390 struct fb_info *info;
1391
1392 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001393#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394 if (!get_pmem_file(fd, start, vstart, len, filep))
1395 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001396#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397
1398 fbfile = fget(fd);
1399 if (fbfile == NULL) {
1400 KGSL_CORE_ERR("fget_light failed\n");
1401 return -1;
1402 }
1403
1404 rdev = fbfile->f_dentry->d_inode->i_rdev;
1405 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1406 if (info) {
1407 *start = info->fix.smem_start;
1408 *len = info->fix.smem_len;
1409 *vstart = (unsigned long)__va(info->fix.smem_start);
1410 ret = 0;
1411 } else {
1412 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1413 MINOR(rdev));
1414 ret = -1;
1415 }
1416
1417 fput(fbfile);
1418
1419 return ret;
1420}
1421
1422static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1423 struct kgsl_pagetable *pagetable,
1424 unsigned int fd, unsigned int offset,
1425 size_t size)
1426{
1427 int ret;
1428 unsigned long phys, virt, len;
1429 struct file *filep;
1430
1431 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1432 if (ret)
1433 return ret;
1434
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001435 ret = -ERANGE;
1436
Wei Zou4061c0b2011-07-08 10:24:22 -07001437 if (phys == 0) {
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001438 KGSL_CORE_ERR("kgsl_get_phys_file returned phys=0\n");
Wei Zou4061c0b2011-07-08 10:24:22 -07001439 goto err;
1440 }
1441
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001442 /* Make sure the length of the region, the offset and the desired
1443 * size are all page aligned or bail
1444 */
1445 if ((len & ~PAGE_MASK) ||
1446 (offset & ~PAGE_MASK) ||
1447 (size & ~PAGE_MASK)) {
1448 KGSL_CORE_ERR("length %lu, offset %u or size %u "
1449 "is not page aligned\n",
1450 len, offset, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001451 goto err;
1452 }
1453
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001454 /* The size or offset can never be greater than the PMEM length */
1455 if (offset >= len || size > len) {
1456 KGSL_CORE_ERR("offset %u or size %u "
1457 "exceeds pmem length %lu\n",
1458 offset, size, len);
1459 goto err;
1460 }
1461
1462 /* If size is 0, then adjust it to default to the size of the region
1463 * minus the offset. If size isn't zero, then make sure that it will
1464 * fit inside of the region.
1465 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 if (size == 0)
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001467 size = len - offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001469 else if (_check_region(offset, size, len))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 goto err;
1471
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001472 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473
1474 entry->memdesc.pagetable = pagetable;
1475 entry->memdesc.size = size;
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001476 entry->memdesc.physaddr = phys + offset;
1477 entry->memdesc.hostptr = (void *) (virt + offset);
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001478 /* USE_CPU_MAP is not impemented for PMEM. */
1479 entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001480
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001481 ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001482 if (ret)
1483 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484
1485 return 0;
1486err:
Jordan Crousefd978432011-09-02 14:34:32 -06001487#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001489#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 return ret;
1491}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492
Jordan Croused17e9aa2011-10-12 16:57:48 -06001493static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001494 unsigned long paddr, int size)
Jordan Croused17e9aa2011-10-12 16:57:48 -06001495{
1496 int i;
1497 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001498
Jordan Crousea652a072012-04-06 16:26:33 -06001499 memdesc->sg = kgsl_sg_alloc(sglen);
1500
Jordan Croused17e9aa2011-10-12 16:57:48 -06001501 if (memdesc->sg == NULL)
1502 return -ENOMEM;
1503
1504 memdesc->sglen = sglen;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001505 memdesc->sglen_alloc = sglen;
1506
Jordan Croused17e9aa2011-10-12 16:57:48 -06001507 sg_init_table(memdesc->sg, sglen);
1508
1509 spin_lock(&current->mm->page_table_lock);
1510
1511 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1512 struct page *page;
1513 pmd_t *ppmd;
1514 pte_t *ppte;
1515 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1516
1517 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1518 goto err;
1519
Steve Mucklef132c6c2012-06-06 18:30:57 -07001520 ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001521 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1522 goto err;
1523
1524 ppte = pte_offset_map(ppmd, paddr);
1525 if (ppte == NULL)
1526 goto err;
1527
1528 page = pfn_to_page(pte_pfn(*ppte));
1529 if (!page)
1530 goto err;
1531
1532 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1533 pte_unmap(ppte);
1534 }
1535
1536 spin_unlock(&current->mm->page_table_lock);
1537
1538 return 0;
1539
1540err:
1541 spin_unlock(&current->mm->page_table_lock);
Jordan Crousea652a072012-04-06 16:26:33 -06001542 kgsl_sg_free(memdesc->sg, sglen);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001543 memdesc->sg = NULL;
1544
1545 return -EINVAL;
1546}
1547
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001548static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 struct kgsl_pagetable *pagetable,
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001550 unsigned long useraddr, unsigned int offset,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551 size_t size)
1552{
1553 struct vm_area_struct *vma;
1554 unsigned int len;
1555
1556 down_read(&current->mm->mmap_sem);
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001557 vma = find_vma(current->mm, useraddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 up_read(&current->mm->mmap_sem);
1559
1560 if (!vma) {
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001561 KGSL_CORE_ERR("find_vma(%lx) failed\n", useraddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562 return -EINVAL;
1563 }
1564
1565 /* We don't necessarily start at vma->vm_start */
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001566 len = vma->vm_end - useraddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567
1568 if (offset >= len)
1569 return -EINVAL;
1570
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001571 if (!KGSL_IS_PAGE_ALIGNED(useraddr) ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572 !KGSL_IS_PAGE_ALIGNED(len)) {
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001573 KGSL_CORE_ERR("bad alignment: start(%lx) len(%u)\n",
1574 useraddr, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001575 return -EINVAL;
1576 }
1577
1578 if (size == 0)
1579 size = len;
1580
1581 /* Adjust the size of the region to account for the offset */
1582 size += offset & ~PAGE_MASK;
1583
1584 size = ALIGN(size, PAGE_SIZE);
1585
1586 if (_check_region(offset & PAGE_MASK, size, len)) {
1587 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1588 "than region length %d\n",
1589 offset & PAGE_MASK, size, len);
1590 return -EINVAL;
1591 }
1592
1593 entry->memdesc.pagetable = pagetable;
1594 entry->memdesc.size = size;
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001595 entry->memdesc.useraddr = useraddr + (offset & PAGE_MASK);
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001596 if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
1597 entry->memdesc.gpuaddr = entry->memdesc.useraddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001599 return memdesc_sg_virt(&entry->memdesc, entry->memdesc.useraddr,
1600 size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001601}
1602
1603#ifdef CONFIG_ASHMEM
1604static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1605 struct kgsl_pagetable *pagetable,
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001606 int fd, unsigned long useraddr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607{
1608 int ret;
1609 struct vm_area_struct *vma;
1610 struct file *filep, *vmfile;
1611 unsigned long len;
1612
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001613 vma = kgsl_get_vma_from_start_addr(useraddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001614 if (vma == NULL)
1615 return -EINVAL;
1616
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001617 if (vma->vm_pgoff || vma->vm_start != useraddr) {
Jordan Crouse2c542b62011-07-26 08:30:20 -06001618 KGSL_CORE_ERR("Invalid vma region\n");
1619 return -EINVAL;
1620 }
1621
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001622 len = vma->vm_end - vma->vm_start;
1623
1624 if (size == 0)
1625 size = len;
1626
1627 if (size != len) {
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001628 KGSL_CORE_ERR("Invalid size %d for vma region %lx\n",
1629 size, useraddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 return -EINVAL;
1631 }
1632
1633 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1634
1635 if (ret) {
1636 KGSL_CORE_ERR("get_ashmem_file failed\n");
1637 return ret;
1638 }
1639
1640 if (vmfile != vma->vm_file) {
1641 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1642 ret = -EINVAL;
1643 goto err;
1644 }
1645
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001646 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647 entry->memdesc.pagetable = pagetable;
1648 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001649 entry->memdesc.useraddr = useraddr;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001650 if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
1651 entry->memdesc.gpuaddr = entry->memdesc.useraddr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001652
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001653 ret = memdesc_sg_virt(&entry->memdesc, useraddr, size);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001654 if (ret)
1655 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656
1657 return 0;
1658
1659err:
1660 put_ashmem_file(filep);
1661 return ret;
1662}
1663#else
1664static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1665 struct kgsl_pagetable *pagetable,
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001666 int fd, unsigned long useraddr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667{
1668 return -EINVAL;
1669}
1670#endif
1671
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001672static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1673 struct kgsl_pagetable *pagetable, int fd)
1674{
1675 struct ion_handle *handle;
1676 struct scatterlist *s;
Laura Abbottb14ed962012-01-30 14:18:08 -08001677 struct sg_table *sg_table;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001678
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06001679 if (IS_ERR_OR_NULL(kgsl_ion_client))
1680 return -ENODEV;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001681
Laura Abbottb14ed962012-01-30 14:18:08 -08001682 handle = ion_import_dma_buf(kgsl_ion_client, fd);
Ranjhith Kalisamy0d2e14f2012-08-14 19:49:39 +05301683 if (IS_ERR(handle))
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001684 return PTR_ERR(handle);
Ranjhith Kalisamy0d2e14f2012-08-14 19:49:39 +05301685 else if (!handle)
1686 return -EINVAL;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001687
1688 entry->memtype = KGSL_MEM_ENTRY_ION;
1689 entry->priv_data = handle;
1690 entry->memdesc.pagetable = pagetable;
1691 entry->memdesc.size = 0;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001692 /* USE_CPU_MAP is not impemented for ION. */
1693 entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001694
Laura Abbottb14ed962012-01-30 14:18:08 -08001695 sg_table = ion_sg_table(kgsl_ion_client, handle);
1696
1697 if (IS_ERR_OR_NULL(sg_table))
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001698 goto err;
1699
Laura Abbottb14ed962012-01-30 14:18:08 -08001700 entry->memdesc.sg = sg_table->sgl;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001701
1702 /* Calculate the size of the memdesc from the sglist */
1703
1704 entry->memdesc.sglen = 0;
1705
1706 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1707 entry->memdesc.size += s->length;
1708 entry->memdesc.sglen++;
1709 }
1710
1711 return 0;
1712err:
1713 ion_free(kgsl_ion_client, handle);
1714 return -ENOMEM;
1715}
1716
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001717static inline int
1718can_use_cpu_map(void)
1719{
1720 return (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
1721 && kgsl_mmu_is_perprocess());
1722}
1723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1725 unsigned int cmd, void *data)
1726{
1727 int result = -EINVAL;
1728 struct kgsl_map_user_mem *param = data;
1729 struct kgsl_mem_entry *entry = NULL;
1730 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001731 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732
1733 entry = kgsl_mem_entry_create();
1734
1735 if (entry == NULL)
1736 return -ENOMEM;
1737
Jason848741a2011-07-12 10:24:25 -07001738 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1739 memtype = KGSL_USER_MEM_TYPE_PMEM;
1740 else
1741 memtype = param->memtype;
1742
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001743 /*
1744 * Mask off unknown flags from userspace. This way the caller can
1745 * check if a flag is supported by looking at the returned flags.
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001746 * Note: CACHEMODE is ignored for this call. Caching should be
1747 * determined by type of allocation being mapped.
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001748 */
1749 param->flags &= KGSL_MEMFLAGS_GPUREADONLY
1750 | KGSL_MEMTYPE_MASK
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001751 | KGSL_MEMALIGN_MASK
1752 | KGSL_MEMFLAGS_USE_CPU_MAP;
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001753
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001754 entry->memdesc.flags = param->flags;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001755 if (!can_use_cpu_map())
1756 entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001757
Jason848741a2011-07-12 10:24:25 -07001758 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001759 case KGSL_USER_MEM_TYPE_PMEM:
1760 if (param->fd == 0 || param->len == 0)
1761 break;
1762
1763 result = kgsl_setup_phys_file(entry, private->pagetable,
1764 param->fd, param->offset,
1765 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001766 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767 break;
1768
1769 case KGSL_USER_MEM_TYPE_ADDR:
Harsh Vardhan Dwivedia9eb7cb2012-03-26 15:21:38 -06001770 KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type "
1771 "KGSL_USER_MEM_TYPE_ADDR is deprecated\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 if (!kgsl_mmu_enabled()) {
1773 KGSL_DRV_ERR(dev_priv->device,
1774 "Cannot map paged memory with the "
1775 "MMU disabled\n");
1776 break;
1777 }
1778
1779 if (param->hostptr == 0)
1780 break;
1781
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001782 result = kgsl_setup_useraddr(entry, private->pagetable,
1783 param->hostptr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001785 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786 break;
1787
1788 case KGSL_USER_MEM_TYPE_ASHMEM:
1789 if (!kgsl_mmu_enabled()) {
1790 KGSL_DRV_ERR(dev_priv->device,
1791 "Cannot map paged memory with the "
1792 "MMU disabled\n");
1793 break;
1794 }
1795
1796 if (param->hostptr == 0)
1797 break;
1798
1799 result = kgsl_setup_ashmem(entry, private->pagetable,
Jeremy Gebbend1f8c902013-05-28 16:53:45 -06001800 param->fd, param->hostptr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001801 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001802
1803 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001805 case KGSL_USER_MEM_TYPE_ION:
1806 result = kgsl_setup_ion(entry, private->pagetable,
1807 param->fd);
1808 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001809 default:
Jason848741a2011-07-12 10:24:25 -07001810 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811 break;
1812 }
1813
1814 if (result)
1815 goto error;
1816
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001817 if (entry->memdesc.size >= SZ_1M)
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001818 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001819 else if (entry->memdesc.size >= SZ_64K)
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001820 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001821
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822 result = kgsl_mmu_map(private->pagetable,
1823 &entry->memdesc,
1824 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1825
1826 if (result)
1827 goto error_put_file_ptr;
1828
1829 /* Adjust the returned value for a non 4k aligned offset */
1830 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001831 /* echo back flags */
1832 param->flags = entry->memdesc.flags;
1833
1834 result = kgsl_mem_entry_attach_process(entry, private);
1835 if (result)
1836 goto error_unmap;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001838 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001839 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001841 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842
Jeremy Gebbena5859272012-03-01 12:46:28 -07001843 trace_kgsl_mem_map(entry, param->fd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844
1845 kgsl_check_idle(dev_priv->device);
1846 return result;
1847
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001848error_unmap:
1849 kgsl_mmu_unmap(private->pagetable, &entry->memdesc);
Jeremy Gebben53d4dd02012-05-07 15:42:00 -06001850error_put_file_ptr:
1851 switch (entry->memtype) {
1852 case KGSL_MEM_ENTRY_PMEM:
1853 case KGSL_MEM_ENTRY_ASHMEM:
1854 if (entry->priv_data)
1855 fput(entry->priv_data);
1856 break;
1857 case KGSL_MEM_ENTRY_ION:
Jeremy Gebben53d4dd02012-05-07 15:42:00 -06001858 ion_free(kgsl_ion_client, entry->priv_data);
1859 break;
1860 default:
1861 break;
1862 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001863error:
1864 kfree(entry);
1865 kgsl_check_idle(dev_priv->device);
1866 return result;
1867}
1868
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001869static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry, int op)
1870{
1871 int ret = 0;
1872 int cacheop;
1873 int mode;
1874
1875 /*
1876 * Flush is defined as (clean | invalidate). If both bits are set, then
1877 * do a flush, otherwise check for the individual bits and clean or inv
1878 * as requested
1879 */
1880
1881 if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
1882 cacheop = KGSL_CACHE_OP_FLUSH;
1883 else if (op & KGSL_GPUMEM_CACHE_CLEAN)
1884 cacheop = KGSL_CACHE_OP_CLEAN;
1885 else if (op & KGSL_GPUMEM_CACHE_INV)
1886 cacheop = KGSL_CACHE_OP_INV;
1887 else {
1888 ret = -EINVAL;
1889 goto done;
1890 }
1891
1892 mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
1893 if (mode != KGSL_CACHEMODE_UNCACHED
1894 && mode != KGSL_CACHEMODE_WRITECOMBINE)
1895 kgsl_cache_range_op(&entry->memdesc, cacheop);
1896
1897done:
1898 return ret;
1899}
1900
1901/* New cache sync function - supports both directions (clean and invalidate) */
1902
1903static long
1904kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
1905 unsigned int cmd, void *data)
1906{
1907 struct kgsl_gpumem_sync_cache *param = data;
1908 struct kgsl_process_private *private = dev_priv->process_priv;
1909 struct kgsl_mem_entry *entry = NULL;
1910
1911 if (param->id != 0) {
1912 entry = kgsl_sharedmem_find_id(private, param->id);
1913 if (entry == NULL) {
1914 KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
1915 param->id);
1916 return -EINVAL;
1917 }
1918 } else if (param->gpuaddr != 0) {
1919 spin_lock(&private->mem_lock);
1920 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1921 spin_unlock(&private->mem_lock);
1922 if (entry == NULL) {
1923 KGSL_MEM_INFO(dev_priv->device,
1924 "can't find gpuaddr %x\n",
1925 param->gpuaddr);
1926 return -EINVAL;
1927 }
1928 } else {
1929 return -EINVAL;
1930 }
1931
1932 return _kgsl_gpumem_sync_cache(entry, param->op);
1933}
1934
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001935/* Legacy cache function, does a flush (clean + invalidate) */
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937static long
1938kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1939 unsigned int cmd, void *data)
1940{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941 struct kgsl_sharedmem_free *param = data;
1942 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001943 struct kgsl_mem_entry *entry = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944
1945 spin_lock(&private->mem_lock);
1946 entry = kgsl_sharedmem_find(private, param->gpuaddr);
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001947 spin_unlock(&private->mem_lock);
1948 if (entry == NULL) {
1949 KGSL_MEM_INFO(dev_priv->device,
1950 "can't find gpuaddr %x\n",
1951 param->gpuaddr);
1952 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001954
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001955 return _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956}
1957
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001958/*
1959 * The common parts of kgsl_ioctl_gpumem_alloc and kgsl_ioctl_gpumem_alloc_id.
1960 */
1961int
1962_gpumem_alloc(struct kgsl_device_private *dev_priv,
1963 struct kgsl_mem_entry **ret_entry,
1964 unsigned int size, unsigned int flags)
1965{
1966 int result;
1967 struct kgsl_process_private *private = dev_priv->process_priv;
1968 struct kgsl_mem_entry *entry;
1969
1970 /*
1971 * Mask off unknown flags from userspace. This way the caller can
1972 * check if a flag is supported by looking at the returned flags.
1973 */
1974 flags &= KGSL_MEMFLAGS_GPUREADONLY
Jordan Crousee9efb0b2013-05-28 16:54:19 -06001975 | KGSL_CACHEMODE_MASK
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001976 | KGSL_MEMTYPE_MASK
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06001977 | KGSL_MEMALIGN_MASK
1978 | KGSL_MEMFLAGS_USE_CPU_MAP;
Jeremy Gebbena46f4272013-05-28 16:54:09 -06001979
1980 entry = kgsl_mem_entry_create();
1981 if (entry == NULL)
1982 return -ENOMEM;
1983
1984 result = kgsl_allocate_user(&entry->memdesc, private->pagetable, size,
1985 flags);
1986 if (result != 0)
1987 goto err;
1988
1989 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
1990
1991 kgsl_check_idle(dev_priv->device);
1992 *ret_entry = entry;
1993 return result;
1994err:
1995 kfree(entry);
1996 *ret_entry = NULL;
1997 return result;
1998}
1999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000static long
2001kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
2002 unsigned int cmd, void *data)
2003{
2004 struct kgsl_process_private *private = dev_priv->process_priv;
2005 struct kgsl_gpumem_alloc *param = data;
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002006 struct kgsl_mem_entry *entry = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002007 int result;
2008
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002009 param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002010 result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
2011 if (result)
2012 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002013
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002014 result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
2015 kgsl_memdesc_protflags(&entry->memdesc));
2016 if (result)
2017 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002019 result = kgsl_mem_entry_attach_process(entry, private);
2020 if (result != 0)
2021 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002023 kgsl_process_add_stats(private, entry->memtype, param->size);
2024 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002026 param->gpuaddr = entry->memdesc.gpuaddr;
2027 param->size = entry->memdesc.size;
2028 param->flags = entry->memdesc.flags;
2029 return result;
2030err:
2031 kgsl_sharedmem_free(&entry->memdesc);
2032 kfree(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033 return result;
2034}
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002035
2036static long
2037kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
2038 unsigned int cmd, void *data)
2039{
2040 struct kgsl_process_private *private = dev_priv->process_priv;
2041 struct kgsl_gpumem_alloc_id *param = data;
2042 struct kgsl_mem_entry *entry = NULL;
2043 int result;
2044
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002045 if (!can_use_cpu_map())
2046 param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
2047
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002048 result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
2049 if (result != 0)
2050 goto err;
2051
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002052 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
2053 result = kgsl_mmu_map(private->pagetable, &entry->memdesc,
2054 kgsl_memdesc_protflags(&entry->memdesc));
2055 if (result)
2056 goto err;
2057 }
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002058
2059 result = kgsl_mem_entry_attach_process(entry, private);
2060 if (result != 0)
2061 goto err;
2062
2063 kgsl_process_add_stats(private, entry->memtype, param->size);
2064 trace_kgsl_mem_alloc(entry);
2065
2066 param->id = entry->id;
2067 param->flags = entry->memdesc.flags;
2068 param->size = entry->memdesc.size;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002069 param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002070 param->gpuaddr = entry->memdesc.gpuaddr;
2071 return result;
2072err:
2073 if (entry)
2074 kgsl_sharedmem_free(&entry->memdesc);
2075 kfree(entry);
2076 return result;
2077}
2078
2079static long
2080kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
2081 unsigned int cmd, void *data)
2082{
2083 struct kgsl_process_private *private = dev_priv->process_priv;
2084 struct kgsl_gpumem_get_info *param = data;
2085 struct kgsl_mem_entry *entry = NULL;
2086 int result = 0;
2087
2088 if (param->id != 0) {
2089 entry = kgsl_sharedmem_find_id(private, param->id);
2090 if (entry == NULL) {
2091 KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
2092 param->id);
2093 return -EINVAL;
2094 }
2095 } else if (param->gpuaddr != 0) {
2096 spin_lock(&private->mem_lock);
2097 entry = kgsl_sharedmem_find(private, param->gpuaddr);
2098 spin_unlock(&private->mem_lock);
2099 if (entry == NULL) {
2100 KGSL_MEM_INFO(dev_priv->device,
2101 "can't find gpuaddr %lx\n",
2102 param->gpuaddr);
2103 return -EINVAL;
2104 }
2105 } else {
2106 return -EINVAL;
2107 }
2108 param->gpuaddr = entry->memdesc.gpuaddr;
2109 param->id = entry->id;
2110 param->flags = entry->memdesc.flags;
2111 param->size = entry->memdesc.size;
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002112 param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002113 param->useraddr = entry->memdesc.useraddr;
2114 return result;
2115}
2116
Jeremy Gebbena7423e42011-04-18 15:11:21 -06002117static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
2118 unsigned int cmd, void *data)
2119{
2120 int result = 0;
2121 struct kgsl_cff_syncmem *param = data;
2122 struct kgsl_process_private *private = dev_priv->process_priv;
2123 struct kgsl_mem_entry *entry = NULL;
2124
2125 spin_lock(&private->mem_lock);
2126 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
2127 if (entry)
2128 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
2129 param->len, true);
2130 else
2131 result = -EINVAL;
2132 spin_unlock(&private->mem_lock);
2133 return result;
2134}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002135
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06002136static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
2137 unsigned int cmd, void *data)
2138{
2139 int result = 0;
2140 struct kgsl_cff_user_event *param = data;
2141
2142 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
2143 param->op3, param->op4, param->op5);
2144
2145 return result;
2146}
2147
Jordan Croused4bc9d22011-11-17 13:39:21 -07002148#ifdef CONFIG_GENLOCK
2149struct kgsl_genlock_event_priv {
2150 struct genlock_handle *handle;
2151 struct genlock *lock;
2152};
2153
2154/**
2155 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
2156 * @device - The KGSL device that expired the timestamp
2157 * @priv - private data for the event
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002158 * @context_id - the context id that goes with the timestamp
Jordan Croused4bc9d22011-11-17 13:39:21 -07002159 * @timestamp - the timestamp that triggered the event
2160 *
2161 * Release a genlock lock following the expiration of a timestamp
2162 */
2163
2164static void kgsl_genlock_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002165 void *priv, u32 context_id, u32 timestamp)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002166{
2167 struct kgsl_genlock_event_priv *ev = priv;
2168 int ret;
2169
2170 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
2171 if (ret)
2172 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
2173
2174 genlock_put_handle(ev->handle);
2175
2176 kfree(ev);
2177}
2178
2179/**
2180 * kgsl_add_genlock-event - Create a new genlock event
2181 * @device - KGSL device to create the event on
2182 * @timestamp - Timestamp to trigger the event
2183 * @data - User space buffer containing struct kgsl_genlock_event_priv
2184 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002185 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07002186 * @returns 0 on success or error code on error
2187 *
2188 * Attack to a genlock handle and register an event to release the
2189 * genlock lock when the timestamp expires
2190 */
2191
2192static int kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002193 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002194 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002195{
2196 struct kgsl_genlock_event_priv *event;
2197 struct kgsl_timestamp_event_genlock priv;
2198 int ret;
2199
2200 if (len != sizeof(priv))
2201 return -EINVAL;
2202
2203 if (copy_from_user(&priv, data, sizeof(priv)))
2204 return -EFAULT;
2205
2206 event = kzalloc(sizeof(*event), GFP_KERNEL);
2207
2208 if (event == NULL)
2209 return -ENOMEM;
2210
2211 event->handle = genlock_get_handle_fd(priv.handle);
2212
2213 if (IS_ERR(event->handle)) {
2214 int ret = PTR_ERR(event->handle);
2215 kfree(event);
2216 return ret;
2217 }
2218
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002219 ret = kgsl_add_event(device, context_id, timestamp,
2220 kgsl_genlock_event_cb, event, owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002221 if (ret)
2222 kfree(event);
2223
2224 return ret;
2225}
2226#else
2227static long kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002228 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002229 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002230{
2231 return -EINVAL;
2232}
2233#endif
2234
2235/**
2236 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
2237 * @dev_priv - pointer to the private device structure
2238 * @cmd - the ioctl cmd passed from kgsl_ioctl
2239 * @data - the user data buffer from kgsl_ioctl
2240 * @returns 0 on success or error code on failure
2241 */
2242
2243static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
2244 unsigned int cmd, void *data)
2245{
2246 struct kgsl_timestamp_event *param = data;
2247 int ret;
2248
2249 switch (param->type) {
2250 case KGSL_TIMESTAMP_EVENT_GENLOCK:
2251 ret = kgsl_add_genlock_event(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002252 param->context_id, param->timestamp, param->priv,
2253 param->len, dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002254 break;
Jeff Boodyfe6c39c2012-08-09 13:54:50 -06002255 case KGSL_TIMESTAMP_EVENT_FENCE:
2256 ret = kgsl_add_fence_event(dev_priv->device,
2257 param->context_id, param->timestamp, param->priv,
2258 param->len, dev_priv);
2259 break;
Jordan Croused4bc9d22011-11-17 13:39:21 -07002260 default:
2261 ret = -EINVAL;
2262 }
2263
2264 return ret;
2265}
2266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002267typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
2268 unsigned int, void *);
2269
Vladimir Razgulin38345302013-01-22 18:41:59 -07002270#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
2271 [_IOC_NR((_cmd))] = \
2272 { .cmd = (_cmd), .func = (_func), .flags = (_flags) }
2273
2274#define KGSL_IOCTL_LOCK BIT(0)
2275#define KGSL_IOCTL_WAKE BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002276
2277static const struct {
2278 unsigned int cmd;
2279 kgsl_ioctl_func_t func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002280 int flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281} kgsl_ioctl_funcs[] = {
2282 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002283 kgsl_ioctl_device_getproperty,
2284 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002285 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002286 kgsl_ioctl_device_waittimestamp,
2287 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002288 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002289 kgsl_ioctl_device_waittimestamp_ctxtid,
2290 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002291 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002292 kgsl_ioctl_rb_issueibcmds,
2293 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002294 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002295 kgsl_ioctl_cmdstream_readtimestamp,
2296 KGSL_IOCTL_LOCK),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002297 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002298 kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
2299 KGSL_IOCTL_LOCK),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002301 kgsl_ioctl_cmdstream_freememontimestamp,
2302 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002303 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002304 kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
2305 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002307 kgsl_ioctl_drawctxt_create,
2308 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002309 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002310 kgsl_ioctl_drawctxt_destroy,
2311 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
2313 kgsl_ioctl_map_user_mem, 0),
2314 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
2315 kgsl_ioctl_map_user_mem, 0),
2316 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
2317 kgsl_ioctl_sharedmem_free, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
2319 kgsl_ioctl_sharedmem_flush_cache, 0),
2320 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
2321 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06002322 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
2323 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06002324 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
2325 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07002326 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002327 kgsl_ioctl_timestamp_event,
2328 KGSL_IOCTL_LOCK),
Jordan Crouseed7dd7f2012-03-29 13:16:02 -06002329 KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002330 kgsl_ioctl_device_setproperty,
Jeremy Gebbena46f4272013-05-28 16:54:09 -06002331 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
2332 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
2333 kgsl_ioctl_gpumem_alloc_id, 0),
2334 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
2335 kgsl_ioctl_gpumem_free_id, 0),
2336 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
2337 kgsl_ioctl_gpumem_get_info, 0),
Jordan Crousee9efb0b2013-05-28 16:54:19 -06002338 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
2339 kgsl_ioctl_gpumem_sync_cache, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002340};
2341
2342static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2343{
2344 struct kgsl_device_private *dev_priv = filep->private_data;
Jordan Crouse1e76f612012-08-08 13:24:21 -06002345 unsigned int nr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002346 kgsl_ioctl_func_t func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002347 int lock, ret, use_hw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002348 char ustack[64];
2349 void *uptr = NULL;
2350
2351 BUG_ON(dev_priv == NULL);
2352
2353 /* Workaround for an previously incorrectly defined ioctl code.
2354 This helps ensure binary compatability */
2355
2356 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
2357 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07002358 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
2359 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Jeff Boodyfe6c39c2012-08-09 13:54:50 -06002360 else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD)
2361 cmd = IOCTL_KGSL_TIMESTAMP_EVENT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002362
Jordan Crouse1e76f612012-08-08 13:24:21 -06002363 nr = _IOC_NR(cmd);
2364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002365 if (cmd & (IOC_IN | IOC_OUT)) {
2366 if (_IOC_SIZE(cmd) < sizeof(ustack))
2367 uptr = ustack;
2368 else {
2369 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
2370 if (uptr == NULL) {
2371 KGSL_MEM_ERR(dev_priv->device,
2372 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
2373 ret = -ENOMEM;
2374 goto done;
2375 }
2376 }
2377
2378 if (cmd & IOC_IN) {
2379 if (copy_from_user(uptr, (void __user *) arg,
2380 _IOC_SIZE(cmd))) {
2381 ret = -EFAULT;
2382 goto done;
2383 }
2384 } else
2385 memset(uptr, 0, _IOC_SIZE(cmd));
2386 }
2387
2388 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
Jordan Crouse1e76f612012-08-08 13:24:21 -06002389 kgsl_ioctl_funcs[nr].func != NULL) {
2390
2391 /*
2392 * Make sure that nobody tried to send us a malformed ioctl code
2393 * with a valid NR but bogus flags
2394 */
2395
2396 if (kgsl_ioctl_funcs[nr].cmd != cmd) {
2397 KGSL_DRV_ERR(dev_priv->device,
2398 "Malformed ioctl code %08x\n", cmd);
2399 ret = -ENOIOCTLCMD;
2400 goto done;
2401 }
2402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002403 func = kgsl_ioctl_funcs[nr].func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002404 lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
2405 use_hw = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_WAKE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002406 } else {
2407 func = dev_priv->device->ftbl->ioctl;
2408 if (!func) {
2409 KGSL_DRV_INFO(dev_priv->device,
2410 "invalid ioctl code %08x\n", cmd);
Jeremy Gebbenc15b4612012-01-09 09:44:11 -07002411 ret = -ENOIOCTLCMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002412 goto done;
2413 }
2414 lock = 1;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002415 use_hw = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002416 }
2417
2418 if (lock) {
2419 mutex_lock(&dev_priv->device->mutex);
Vladimir Razgulin38345302013-01-22 18:41:59 -07002420 if (use_hw)
2421 kgsl_check_suspended(dev_priv->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002422 }
2423
2424 ret = func(dev_priv, cmd, uptr);
2425
2426 if (lock) {
2427 kgsl_check_idle_locked(dev_priv->device);
2428 mutex_unlock(&dev_priv->device->mutex);
2429 }
2430
2431 if (ret == 0 && (cmd & IOC_OUT)) {
2432 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
2433 ret = -EFAULT;
2434 }
2435
2436done:
2437 if (_IOC_SIZE(cmd) >= sizeof(ustack))
2438 kfree(uptr);
2439
2440 return ret;
2441}
2442
2443static int
2444kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
2445{
2446 struct kgsl_memdesc *memdesc = &device->memstore;
2447 int result;
2448 unsigned int vma_size = vma->vm_end - vma->vm_start;
2449
2450 /* The memstore can only be mapped as read only */
2451
2452 if (vma->vm_flags & VM_WRITE)
2453 return -EPERM;
2454
2455 if (memdesc->size != vma_size) {
2456 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
2457 vma_size, memdesc->size);
2458 return -EINVAL;
2459 }
2460
2461 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2462
Shubhraprakash Das87f68132012-07-30 23:25:13 -07002463 result = remap_pfn_range(vma, vma->vm_start,
2464 device->memstore.physaddr >> PAGE_SHIFT,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465 vma_size, vma->vm_page_prot);
2466 if (result != 0)
2467 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2468 result);
2469
2470 return result;
2471}
2472
Jordan Crouse4283e172011-09-26 14:45:47 -06002473/*
2474 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2475 * Increase the refcount to make sure that the accounting stays correct
2476 */
2477
2478static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2479{
2480 struct kgsl_mem_entry *entry = vma->vm_private_data;
2481 kgsl_mem_entry_get(entry);
2482}
2483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002484static int
2485kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2486{
2487 struct kgsl_mem_entry *entry = vma->vm_private_data;
2488
Jordan Croused17e9aa2011-10-12 16:57:48 -06002489 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490 return VM_FAULT_SIGBUS;
2491
2492 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2493}
2494
2495static void
2496kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2497{
2498 struct kgsl_mem_entry *entry = vma->vm_private_data;
Jeremy Gebbencc0c7092013-05-28 16:53:53 -06002499
2500 entry->memdesc.useraddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501 kgsl_mem_entry_put(entry);
2502}
2503
2504static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002505 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002506 .fault = kgsl_gpumem_vm_fault,
2507 .close = kgsl_gpumem_vm_close,
2508};
2509
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002510static int
2511get_mmap_entry(struct kgsl_process_private *private,
2512 struct kgsl_mem_entry **out_entry, unsigned long pgoff,
2513 unsigned long len)
2514{
2515 int ret = -EINVAL;
2516 struct kgsl_mem_entry *entry;
2517
2518 entry = kgsl_sharedmem_find_id(private, pgoff);
2519 if (entry == NULL) {
2520 spin_lock(&private->mem_lock);
2521 entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
2522 spin_unlock(&private->mem_lock);
2523 }
2524
2525 if (!entry)
2526 return -EINVAL;
2527
2528 kgsl_mem_entry_get(entry);
2529
2530 if (!entry->memdesc.ops ||
2531 !entry->memdesc.ops->vmflags ||
2532 !entry->memdesc.ops->vmfault) {
2533 ret = -EINVAL;
2534 goto err_put;
2535 }
2536
2537 if (entry->memdesc.useraddr != 0) {
2538 ret = -EBUSY;
2539 goto err_put;
2540 }
2541
2542 if (len != kgsl_memdesc_mmapsize(&entry->memdesc)) {
2543 ret = -ERANGE;
2544 goto err_put;
2545 }
2546
2547 *out_entry = entry;
2548 return 0;
2549err_put:
2550 kgsl_mem_entry_put(entry);
2551 return ret;
2552}
2553
2554static unsigned long
2555kgsl_get_unmapped_area(struct file *file, unsigned long addr,
2556 unsigned long len, unsigned long pgoff,
2557 unsigned long flags)
2558{
2559 unsigned long ret = 0;
2560 unsigned long vma_offset = pgoff << PAGE_SHIFT;
2561 struct kgsl_device_private *dev_priv = file->private_data;
2562 struct kgsl_process_private *private = dev_priv->process_priv;
2563 struct kgsl_device *device = dev_priv->device;
2564 struct kgsl_mem_entry *entry = NULL;
2565 unsigned int align;
2566 unsigned int retry = 0;
2567
2568 if (vma_offset == device->memstore.gpuaddr)
2569 return get_unmapped_area(NULL, addr, len, pgoff, flags);
2570
2571 ret = get_mmap_entry(private, &entry, pgoff, len);
2572 if (ret)
2573 return ret;
2574
2575 if (!kgsl_memdesc_use_cpu_map(&entry->memdesc) || (flags & MAP_FIXED)) {
2576 /*
2577 * If we're not going to use the same mapping on the gpu,
2578 * any address is fine.
2579 * For MAP_FIXED, hopefully the caller knows what they're doing,
2580 * but we may fail in mmap() if there is already something
2581 * at the virtual address chosen.
2582 */
2583 ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
2584 goto put;
2585 }
2586 if (entry->memdesc.gpuaddr != 0) {
2587 KGSL_MEM_INFO(device,
2588 "pgoff %lx already mapped to gpuaddr %x\n",
2589 pgoff, entry->memdesc.gpuaddr);
2590 ret = -EBUSY;
2591 goto put;
2592 }
2593
2594 align = kgsl_memdesc_get_align(&entry->memdesc);
2595 if (align >= ilog2(SZ_1M))
2596 align = ilog2(SZ_1M);
2597 else if (align >= ilog2(SZ_64K))
2598 align = ilog2(SZ_64K);
2599 else if (align <= PAGE_SHIFT)
2600 align = 0;
2601
2602 if (align)
2603 len += 1 << align;
2604 do {
2605 ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
2606 if (IS_ERR_VALUE(ret))
2607 break;
2608 if (align)
2609 ret = ALIGN(ret, (1 << align));
2610
2611 /*make sure there isn't a GPU only mapping at this address */
2612 if (kgsl_sharedmem_region_empty(private, ret, len))
2613 break;
2614
2615 trace_kgsl_mem_unmapped_area_collision(entry, addr, len, ret);
2616
2617 /*
2618 * If we collided, bump the hint address so that
2619 * get_umapped_area knows to look somewhere else.
2620 */
2621 addr = (addr == 0) ? ret + len : addr + len;
2622
2623 /*
2624 * The addr hint can be set by userspace to be near
2625 * the end of the address space. Make sure we search
2626 * the whole address space at least once by wrapping
2627 * back around once.
2628 */
2629 if (!retry && (addr + len >= TASK_SIZE)) {
2630 addr = 0;
2631 retry = 1;
2632 } else {
2633 ret = -EBUSY;
2634 }
2635 } while (addr + len < TASK_SIZE);
2636
2637 if (IS_ERR_VALUE(ret))
2638 KGSL_MEM_INFO(device,
2639 "pid %d pgoff %lx len %ld failed error %ld\n",
2640 private->pid, pgoff, len, ret);
2641put:
2642 kgsl_mem_entry_put(entry);
2643 return ret;
2644}
2645
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002646static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2647{
Jordan Crousee9efb0b2013-05-28 16:54:19 -06002648 unsigned int ret, cache;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002649 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650 struct kgsl_device_private *dev_priv = file->private_data;
2651 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crousec9559e42012-04-05 16:55:56 -06002652 struct kgsl_mem_entry *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002653 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654
2655 /* Handle leagacy behavior for memstore */
2656
Shubhraprakash Das87f68132012-07-30 23:25:13 -07002657 if (vma_offset == device->memstore.gpuaddr)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002658 return kgsl_mmap_memstore(device, vma);
2659
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002660 ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
2661 vma->vm_end - vma->vm_start);
2662 if (ret)
2663 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002664
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002665 if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
2666 entry->memdesc.gpuaddr = vma->vm_start;
Jordan Crousec9559e42012-04-05 16:55:56 -06002667
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002668 ret = kgsl_mmu_map(private->pagetable, &entry->memdesc,
2669 kgsl_memdesc_protflags(&entry->memdesc));
2670 if (ret) {
2671 kgsl_mem_entry_put(entry);
2672 return ret;
2673 }
2674 kgsl_mem_entry_track_gpuaddr(private, entry);
Jeremy Gebbencc0c7092013-05-28 16:53:53 -06002675 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002676
2677 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2678
2679 vma->vm_private_data = entry;
Jordan Crousee9efb0b2013-05-28 16:54:19 -06002680
2681 /* Determine user-side caching policy */
2682
2683 cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
2684
2685 switch (cache) {
2686 case KGSL_CACHEMODE_UNCACHED:
2687 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2688 break;
2689 case KGSL_CACHEMODE_WRITETHROUGH:
2690 vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
2691 break;
2692 case KGSL_CACHEMODE_WRITEBACK:
2693 vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
2694 break;
2695 case KGSL_CACHEMODE_WRITECOMBINE:
2696 default:
2697 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2698 break;
2699 }
2700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002701 vma->vm_ops = &kgsl_gpumem_vm_ops;
2702 vma->vm_file = file;
2703
Jeremy Gebbencc0c7092013-05-28 16:53:53 -06002704 entry->memdesc.useraddr = vma->vm_start;
2705
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002706 trace_kgsl_mem_mmap(entry);
2707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002708 return 0;
2709}
2710
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002711static irqreturn_t kgsl_irq_handler(int irq, void *data)
2712{
2713 struct kgsl_device *device = data;
2714
2715 return device->ftbl->irq_handler(device);
2716
2717}
2718
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002719static const struct file_operations kgsl_fops = {
2720 .owner = THIS_MODULE,
2721 .release = kgsl_release,
2722 .open = kgsl_open,
2723 .mmap = kgsl_mmap,
Jeremy Gebbenfec05c22013-05-28 16:59:29 -06002724 .get_unmapped_area = kgsl_get_unmapped_area,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002725 .unlocked_ioctl = kgsl_ioctl,
2726};
2727
2728struct kgsl_driver kgsl_driver = {
2729 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2730 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2731 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2732};
2733EXPORT_SYMBOL(kgsl_driver);
2734
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002735static void _unregister_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002736{
2737 int minor;
2738
2739 mutex_lock(&kgsl_driver.devlock);
2740 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2741 if (device == kgsl_driver.devp[minor])
2742 break;
2743 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002744 if (minor != KGSL_DEVICE_MAX) {
2745 device_destroy(kgsl_driver.class,
2746 MKDEV(MAJOR(kgsl_driver.major), minor));
2747 kgsl_driver.devp[minor] = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002748 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749 mutex_unlock(&kgsl_driver.devlock);
2750}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002751
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002752static int _register_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002753{
2754 int minor, ret;
2755 dev_t dev;
2756
2757 /* Find a minor for the device */
2758
2759 mutex_lock(&kgsl_driver.devlock);
2760 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2761 if (kgsl_driver.devp[minor] == NULL) {
2762 kgsl_driver.devp[minor] = device;
2763 break;
2764 }
2765 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002766 mutex_unlock(&kgsl_driver.devlock);
2767
2768 if (minor == KGSL_DEVICE_MAX) {
2769 KGSL_CORE_ERR("minor devices exhausted\n");
2770 return -ENODEV;
2771 }
2772
2773 /* Create the device */
2774 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2775 device->dev = device_create(kgsl_driver.class,
2776 device->parentdev,
2777 dev, device,
2778 device->name);
2779
2780 if (IS_ERR(device->dev)) {
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002781 mutex_lock(&kgsl_driver.devlock);
2782 kgsl_driver.devp[minor] = NULL;
2783 mutex_unlock(&kgsl_driver.devlock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002784 ret = PTR_ERR(device->dev);
2785 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002786 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002787 }
2788
2789 dev_set_drvdata(device->parentdev, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002790 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002791}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002792
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002793int kgsl_device_platform_probe(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002794{
Michael Street8bacdd02012-01-05 14:55:01 -08002795 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002796 int status = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002797 struct resource *res;
2798 struct platform_device *pdev =
2799 container_of(device->parentdev, struct platform_device, dev);
2800
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002801 status = _register_device(device);
2802 if (status)
2803 return status;
2804
2805 /* Initialize logging first, so that failures below actually print. */
2806 kgsl_device_debugfs_init(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002807
2808 status = kgsl_pwrctrl_init(device);
2809 if (status)
2810 goto error;
2811
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06002812 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
2813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002814 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2815 device->iomemname);
2816 if (res == NULL) {
2817 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2818 status = -EINVAL;
2819 goto error_pwrctrl_close;
2820 }
2821 if (res->start == 0 || resource_size(res) == 0) {
Jordan Crouse7501d452012-04-19 08:58:44 -06002822 KGSL_DRV_ERR(device, "dev %d invalid register region\n",
2823 device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002824 status = -EINVAL;
2825 goto error_pwrctrl_close;
2826 }
2827
Jordan Crouse7501d452012-04-19 08:58:44 -06002828 device->reg_phys = res->start;
2829 device->reg_len = resource_size(res);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002830
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002831 if (!devm_request_mem_region(device->dev, device->reg_phys,
2832 device->reg_len, device->name)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2834 status = -ENODEV;
2835 goto error_pwrctrl_close;
2836 }
2837
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002838 device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
2839 device->reg_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840
Jordan Crouse7501d452012-04-19 08:58:44 -06002841 if (device->reg_virt == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002842 KGSL_DRV_ERR(device, "ioremap failed\n");
2843 status = -ENODEV;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002844 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002845 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002846 /*acquire interrupt */
2847 device->pwrctrl.interrupt_num =
2848 platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
2849
2850 if (device->pwrctrl.interrupt_num <= 0) {
2851 KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
2852 device->pwrctrl.interrupt_num);
2853 status = -EINVAL;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002854 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002855 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002856
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002857 status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
2858 kgsl_irq_handler, IRQF_TRIGGER_HIGH,
2859 device->name, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002860 if (status) {
2861 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2862 device->pwrctrl.interrupt_num, status);
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002863 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002865 disable_irq(device->pwrctrl.interrupt_num);
2866
2867 KGSL_DRV_INFO(device,
Jordan Crouse7501d452012-04-19 08:58:44 -06002868 "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
2869 device->id, device->reg_phys, device->reg_len,
2870 device->reg_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002871
Michael Street8bacdd02012-01-05 14:55:01 -08002872 result = kgsl_drm_init(pdev);
2873 if (result)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002874 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002875
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002876 kgsl_cffdump_open(device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002878 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2879 status = kgsl_create_device_workqueue(device);
2880 if (status)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002881 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002882
2883 status = kgsl_mmu_init(device);
2884 if (status != 0) {
2885 KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
2886 goto error_dest_work_q;
2887 }
2888
2889 status = kgsl_allocate_contiguous(&device->memstore,
Richard Ruigrok2ad5e9d2012-06-14 14:22:05 -07002890 KGSL_MEMSTORE_SIZE);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002891
2892 if (status != 0) {
2893 KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
2894 status);
2895 goto error_close_mmu;
2896 }
2897
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002898 pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
2899 PM_QOS_DEFAULT_VALUE);
2900
2901 /* Initalize the snapshot engine */
2902 kgsl_device_snapshot_init(device);
2903
2904 /* Initialize common sysfs entries */
2905 kgsl_pwrctrl_init_sysfs(device);
2906
2907 return 0;
2908
2909error_close_mmu:
2910 kgsl_mmu_close(device);
2911error_dest_work_q:
2912 destroy_workqueue(device->work_queue);
2913 device->work_queue = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002914error_pwrctrl_close:
2915 kgsl_pwrctrl_close(device);
2916error:
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002917 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002918 return status;
2919}
2920EXPORT_SYMBOL(kgsl_device_platform_probe);
2921
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002922int kgsl_postmortem_dump(struct kgsl_device *device, int manual)
2923{
2924 bool saved_nap;
2925 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
2926
2927 BUG_ON(device == NULL);
2928
2929 kgsl_cffdump_hang(device->id);
2930
2931 /* For a manual dump, make sure that the system is idle */
2932
2933 if (manual) {
2934 if (device->active_cnt != 0) {
2935 mutex_unlock(&device->mutex);
2936 wait_for_completion(&device->suspend_gate);
2937 mutex_lock(&device->mutex);
2938 }
2939
2940 if (device->state == KGSL_STATE_ACTIVE)
Jordan Crousea29a2e02012-08-14 09:09:23 -06002941 kgsl_idle(device);
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002942
2943 }
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002944
Tarun Karra45a50d62013-01-28 21:47:37 -08002945 if (device->pm_dump_enable) {
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002946
Tarun Karra45a50d62013-01-28 21:47:37 -08002947 KGSL_LOG_DUMP(device,
2948 "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
2949 pwr->power_flags, pwr->active_pwrlevel);
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002950
Tarun Karra45a50d62013-01-28 21:47:37 -08002951 KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
2952 pwr->interval_timeout);
2953
2954 }
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002955
2956 /* Disable the idle timer so we don't get interrupted */
2957 del_timer_sync(&device->idle_timer);
2958 mutex_unlock(&device->mutex);
2959 flush_workqueue(device->work_queue);
2960 mutex_lock(&device->mutex);
2961
2962 /* Turn off napping to make sure we have the clocks full
2963 attention through the following process */
2964 saved_nap = device->pwrctrl.nap_allowed;
2965 device->pwrctrl.nap_allowed = false;
2966
2967 /* Force on the clocks */
2968 kgsl_pwrctrl_wake(device);
2969
2970 /* Disable the irq */
2971 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
2972
2973 /*Call the device specific postmortem dump function*/
2974 device->ftbl->postmortem_dump(device, manual);
2975
2976 /* Restore nap mode */
2977 device->pwrctrl.nap_allowed = saved_nap;
2978
2979 /* On a manual trigger, turn on the interrupts and put
2980 the clocks to sleep. They will recover themselves
2981 on the next event. For a hang, leave things as they
Tarun Karrad20d71a2013-01-25 15:38:57 -08002982 are until fault tolerance kicks in. */
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002983
2984 if (manual) {
2985 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
2986
2987 /* try to go into a sleep mode until the next event */
2988 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
2989 kgsl_pwrctrl_sleep(device);
2990 }
2991
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002992 return 0;
2993}
2994EXPORT_SYMBOL(kgsl_postmortem_dump);
2995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002996void kgsl_device_platform_remove(struct kgsl_device *device)
2997{
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002998 kgsl_device_snapshot_close(device);
2999
3000 kgsl_cffdump_close(device->id);
3001 kgsl_pwrctrl_uninit_sysfs(device);
3002
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07003003 pm_qos_remove_request(&device->pm_qos_req_dma);
3004
3005 idr_destroy(&device->context_idr);
3006
3007 kgsl_sharedmem_free(&device->memstore);
3008
3009 kgsl_mmu_close(device);
3010
3011 if (device->work_queue) {
3012 destroy_workqueue(device->work_queue);
3013 device->work_queue = NULL;
3014 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003015 kgsl_pwrctrl_close(device);
3016
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07003017 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003018}
3019EXPORT_SYMBOL(kgsl_device_platform_remove);
3020
3021static int __devinit
3022kgsl_ptdata_init(void)
3023{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -06003024 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count);
3025
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06003026 if (!kgsl_driver.ptpool)
3027 return -ENOMEM;
3028 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029}
3030
3031static void kgsl_core_exit(void)
3032{
Ranjhith Kalisamy4ad59e92012-05-31 19:15:11 +05303033 kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06003034 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003035
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05303036 kgsl_drm_exit();
3037 kgsl_cffdump_destroy();
3038 kgsl_core_debugfs_close();
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05303039
Harsh Vardhan Dwivediefa6b012012-06-15 13:02:27 -06003040 /*
3041 * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
3042 * only if kgsl_driver.virtdev has been populated.
3043 * We check at least one member of kgsl_driver.virtdev to
3044 * see if it is not NULL (and thus, has been populated).
3045 */
3046 if (kgsl_driver.virtdev.class) {
3047 kgsl_sharedmem_uninit_sysfs();
3048 device_unregister(&kgsl_driver.virtdev);
3049 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050
3051 if (kgsl_driver.class) {
3052 class_destroy(kgsl_driver.class);
3053 kgsl_driver.class = NULL;
3054 }
3055
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05303056 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003057}
3058
3059static int __init kgsl_core_init(void)
3060{
3061 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003062 /* alloc major and minor device numbers */
3063 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
3064 KGSL_NAME);
3065 if (result < 0) {
3066 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
3067 goto err;
3068 }
3069
3070 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
3071 kgsl_driver.cdev.owner = THIS_MODULE;
3072 kgsl_driver.cdev.ops = &kgsl_fops;
3073 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
3074 KGSL_DEVICE_MAX);
3075
3076 if (result) {
3077 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
3078 " result= %d\n", kgsl_driver.major, result);
3079 goto err;
3080 }
3081
3082 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
3083
3084 if (IS_ERR(kgsl_driver.class)) {
3085 result = PTR_ERR(kgsl_driver.class);
3086 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
3087 goto err;
3088 }
3089
3090 /* Make a virtual device for managing core related things
3091 in sysfs */
3092 kgsl_driver.virtdev.class = kgsl_driver.class;
3093 dev_set_name(&kgsl_driver.virtdev, "kgsl");
3094 result = device_register(&kgsl_driver.virtdev);
3095 if (result) {
3096 KGSL_CORE_ERR("driver_register failed\n");
3097 goto err;
3098 }
3099
3100 /* Make kobjects in the virtual device for storing statistics */
3101
3102 kgsl_driver.ptkobj =
3103 kobject_create_and_add("pagetables",
3104 &kgsl_driver.virtdev.kobj);
3105
3106 kgsl_driver.prockobj =
3107 kobject_create_and_add("proc",
3108 &kgsl_driver.virtdev.kobj);
3109
3110 kgsl_core_debugfs_init();
3111
3112 kgsl_sharedmem_init_sysfs();
3113 kgsl_cffdump_init();
3114
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003115 INIT_LIST_HEAD(&kgsl_driver.process_list);
3116
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06003117 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
3118
3119 kgsl_mmu_set_mmutype(ksgl_mmu_type);
3120
3121 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
3122 result = kgsl_ptdata_init();
3123 if (result)
3124 goto err;
3125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003127 return 0;
3128
3129err:
3130 kgsl_core_exit();
3131 return result;
3132}
3133
3134module_init(kgsl_core_init);
3135module_exit(kgsl_core_exit);
3136
3137MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
3138MODULE_DESCRIPTION("MSM GPU driver");
3139MODULE_LICENSE("GPL");