blob: 5883f0891c2d7f5859c0961064e39a2b52ab5b3c [file] [log] [blame]
Tarun Karraf8e5cd22012-01-09 14:10:09 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/fb.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/interrupt.h>
20#include <linux/workqueue.h>
21#include <linux/android_pmem.h>
22#include <linux/vmalloc.h>
23#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070024#include <linux/genlock.h>
Jordan Crousec9559e42012-04-05 16:55:56 -060025#include <linux/rbtree.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/ashmem.h>
27#include <linux/major.h>
Jordan Crouse8eab35a2011-10-12 16:57:48 -060028#include <linux/ion.h>
Jeremy Gebben4204d0f2012-03-01 16:06:21 -070029#include <linux/io.h>
30#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32#include "kgsl.h"
33#include "kgsl_debugfs.h"
34#include "kgsl_cffdump.h"
35#include "kgsl_log.h"
36#include "kgsl_sharedmem.h"
37#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060038#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039
40#undef MODULE_PARAM_PREFIX
41#define MODULE_PARAM_PREFIX "kgsl."
42
43static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060044static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045module_param_named(ptcount, kgsl_pagetable_count, int, 0);
46MODULE_PARM_DESC(kgsl_pagetable_count,
47"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060048module_param_named(mmutype, ksgl_mmu_type, charp, 0);
49MODULE_PARM_DESC(ksgl_mmu_type,
50"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Jordan Crouse8eab35a2011-10-12 16:57:48 -060052static struct ion_client *kgsl_ion_client;
53
Jordan Croused4bc9d22011-11-17 13:39:21 -070054/**
55 * kgsl_add_event - Add a new timstamp event for the KGSL device
56 * @device - KGSL device for the new event
57 * @ts - the timestamp to trigger the event on
58 * @cb - callback function to call when the timestamp expires
59 * @priv - private data for the specific event type
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070060 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -070061 *
62 * @returns - 0 on success or error code on failure
63 */
64
Carter Cooper7e7f02e2012-02-15 09:36:31 -070065static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
66 void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070067 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -070068{
69 struct kgsl_event *event;
70 struct list_head *n;
Carter Cooper7e7f02e2012-02-15 09:36:31 -070071 unsigned int cur_ts;
72 struct kgsl_context *context = NULL;
Jordan Croused4bc9d22011-11-17 13:39:21 -070073
74 if (cb == NULL)
75 return -EINVAL;
76
Carter Cooper7e7f02e2012-02-15 09:36:31 -070077 if (id != KGSL_MEMSTORE_GLOBAL) {
78 context = idr_find(&device->context_idr, id);
79 if (context == NULL)
80 return -EINVAL;
81 }
Jeremy Gebben731dac52012-05-10 11:13:42 -060082 cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
Carter Cooper7e7f02e2012-02-15 09:36:31 -070083
Jordan Croused4bc9d22011-11-17 13:39:21 -070084 /* Check to see if the requested timestamp has already fired */
85
Carter Cooper7e7f02e2012-02-15 09:36:31 -070086 if (timestamp_cmp(cur_ts, ts) >= 0) {
87 cb(device, priv, id, cur_ts);
Jordan Croused4bc9d22011-11-17 13:39:21 -070088 return 0;
89 }
90
91 event = kzalloc(sizeof(*event), GFP_KERNEL);
92 if (event == NULL)
93 return -ENOMEM;
94
Carter Cooper7e7f02e2012-02-15 09:36:31 -070095 event->context = context;
Jordan Croused4bc9d22011-11-17 13:39:21 -070096 event->timestamp = ts;
97 event->priv = priv;
98 event->func = cb;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070099 event->owner = owner;
Jordan Croused4bc9d22011-11-17 13:39:21 -0700100
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700101 /*
102 * Add the event in order to the list. Order is by context id
103 * first and then by timestamp for that context.
104 */
Jordan Croused4bc9d22011-11-17 13:39:21 -0700105
106 for (n = device->events.next ; n != &device->events; n = n->next) {
107 struct kgsl_event *e =
108 list_entry(n, struct kgsl_event, list);
109
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700110 if (e->context != context)
111 continue;
112
Jordan Croused4bc9d22011-11-17 13:39:21 -0700113 if (timestamp_cmp(e->timestamp, ts) > 0) {
114 list_add(&event->list, n->prev);
115 break;
116 }
117 }
118
119 if (n == &device->events)
120 list_add_tail(&event->list, &device->events);
121
Jeremy Gebben63904832012-02-07 16:10:55 -0700122 queue_work(device->work_queue, &device->ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700123 return 0;
124}
Jordan Croused4bc9d22011-11-17 13:39:21 -0700125
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700126/**
Lynus Vazf519d2c2012-04-25 15:46:08 +0530127 * kgsl_cancel_events_ctxt - Cancel all events for a context
128 * @device - KGSL device for the events to cancel
129 * @ctxt - context whose events we want to cancel
130 *
131 */
132static void kgsl_cancel_events_ctxt(struct kgsl_device *device,
133 struct kgsl_context *context)
134{
135 struct kgsl_event *event, *event_tmp;
136 unsigned int id, cur;
137
Jeremy Gebben731dac52012-05-10 11:13:42 -0600138 cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
Lynus Vazf519d2c2012-04-25 15:46:08 +0530139 id = context->id;
140
141 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
142 if (event->context != context)
143 continue;
144
145 /*
146 * "cancel" the events by calling their callback.
147 * Currently, events are used for lock and memory
148 * management, so if the process is dying the right
149 * thing to do is release or free.
150 */
151 if (event->func)
152 event->func(device, event->priv, id, cur);
153
154 list_del(&event->list);
155 kfree(event);
156 }
157}
158
159/**
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700160 * kgsl_cancel_events - Cancel all events for a process
161 * @device - KGSL device for the events to cancel
162 * @owner - driver instance that owns the events to cancel
163 *
164 */
165static void kgsl_cancel_events(struct kgsl_device *device,
166 struct kgsl_device_private *owner)
167{
168 struct kgsl_event *event, *event_tmp;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700169 unsigned int id, cur;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700170
171 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
172 if (event->owner != owner)
173 continue;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700174
Jeremy Gebben731dac52012-05-10 11:13:42 -0600175 cur = kgsl_readtimestamp(device, event->context,
176 KGSL_TIMESTAMP_RETIRED);
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700177
178 id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700179 /*
180 * "cancel" the events by calling their callback.
181 * Currently, events are used for lock and memory
182 * management, so if the process is dying the right
183 * thing to do is release or free.
184 */
185 if (event->func)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700186 event->func(device, event->priv, id, cur);
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700187
188 list_del(&event->list);
189 kfree(event);
190 }
191}
192
Jordan Crouse0fdf3a02012-03-16 14:53:41 -0600193/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
194 * @ptbase - the pagetable base of the object
195 * @gpuaddr - the GPU address of the object
196 * @size - Size of the region to search
197 */
198
199struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
200 unsigned int gpuaddr, unsigned int size)
201{
202 struct kgsl_process_private *priv;
203 struct kgsl_mem_entry *entry;
204
205 mutex_lock(&kgsl_driver.process_mutex);
206
207 list_for_each_entry(priv, &kgsl_driver.process_list, list) {
208 if (!kgsl_mmu_pt_equal(priv->pagetable, ptbase))
209 continue;
210 spin_lock(&priv->mem_lock);
211 entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
212
213 if (entry) {
214 spin_unlock(&priv->mem_lock);
215 mutex_unlock(&kgsl_driver.process_mutex);
216 return entry;
217 }
218 spin_unlock(&priv->mem_lock);
219 }
220 mutex_unlock(&kgsl_driver.process_mutex);
221
222 return NULL;
223}
224EXPORT_SYMBOL(kgsl_get_mem_entry);
225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226static inline struct kgsl_mem_entry *
227kgsl_mem_entry_create(void)
228{
229 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
230
231 if (!entry)
232 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
233 else
234 kref_init(&entry->refcount);
235
236 return entry;
237}
238
239void
240kgsl_mem_entry_destroy(struct kref *kref)
241{
242 struct kgsl_mem_entry *entry = container_of(kref,
243 struct kgsl_mem_entry,
244 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600245
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600246 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
247 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600249 /*
250 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
251 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
252 * doesn't try to free it again
253 */
254
255 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
256 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
257 entry->memdesc.sg = NULL;
258 }
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 kgsl_sharedmem_free(&entry->memdesc);
261
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600262 switch (entry->memtype) {
263 case KGSL_MEM_ENTRY_PMEM:
264 case KGSL_MEM_ENTRY_ASHMEM:
265 if (entry->priv_data)
266 fput(entry->priv_data);
267 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600268 case KGSL_MEM_ENTRY_ION:
269 ion_free(kgsl_ion_client, entry->priv_data);
270 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 }
272
273 kfree(entry);
274}
275EXPORT_SYMBOL(kgsl_mem_entry_destroy);
276
277static
278void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
279 struct kgsl_process_private *process)
280{
Jordan Crousec9559e42012-04-05 16:55:56 -0600281 struct rb_node **node;
282 struct rb_node *parent = NULL;
283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284 spin_lock(&process->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -0600285
286 node = &process->mem_rb.rb_node;
287
288 while (*node) {
289 struct kgsl_mem_entry *cur;
290
291 parent = *node;
292 cur = rb_entry(parent, struct kgsl_mem_entry, node);
293
294 if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr)
295 node = &parent->rb_left;
296 else
297 node = &parent->rb_right;
298 }
299
300 rb_link_node(&entry->node, parent, node);
301 rb_insert_color(&entry->node, &process->mem_rb);
302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 spin_unlock(&process->mem_lock);
304
305 entry->priv = process;
306}
307
Jordan Crouse00714012012-03-16 14:53:40 -0600308/* Detach a memory entry from a process and unmap it from the MMU */
309
310static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
311{
312 if (entry == NULL)
313 return;
314
315 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
316 entry->priv = NULL;
317
318 kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
319
320 kgsl_mem_entry_put(entry);
321}
322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323/* Allocate a new context id */
324
325static struct kgsl_context *
326kgsl_create_context(struct kgsl_device_private *dev_priv)
327{
328 struct kgsl_context *context;
329 int ret, id;
330
331 context = kzalloc(sizeof(*context), GFP_KERNEL);
332
333 if (context == NULL)
334 return NULL;
335
336 while (1) {
337 if (idr_pre_get(&dev_priv->device->context_idr,
338 GFP_KERNEL) == 0) {
339 kfree(context);
340 return NULL;
341 }
342
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700343 ret = idr_get_new_above(&dev_priv->device->context_idr,
344 context, 1, &id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345
346 if (ret != -EAGAIN)
347 break;
348 }
349
350 if (ret) {
351 kfree(context);
352 return NULL;
353 }
354
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700355 /* MAX - 1, there is one memdesc in memstore for device info */
356 if (id >= KGSL_MEMSTORE_MAX) {
357 KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
358 "ctxts due to memstore limitation\n",
359 KGSL_MEMSTORE_MAX);
360 idr_remove(&dev_priv->device->context_idr, id);
361 kfree(context);
362 return NULL;
363 }
364
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600365 kref_init(&context->refcount);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 context->id = id;
367 context->dev_priv = dev_priv;
368
369 return context;
370}
371
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600372/**
373 * kgsl_context_detach - Release the "master" context reference
374 * @context - The context that will be detached
375 *
376 * This is called when a context becomes unusable, because userspace
377 * has requested for it to be destroyed. The context itself may
378 * exist a bit longer until its reference count goes to zero.
379 * Other code referencing the context can detect that it has been
380 * detached because the context id will be set to KGSL_CONTEXT_INVALID.
381 */
382void
383kgsl_context_detach(struct kgsl_context *context)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384{
385 int id;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600386 struct kgsl_device *device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 if (context == NULL)
388 return;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600389 device = context->dev_priv->device;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -0600390 trace_kgsl_context_detach(device, context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 id = context->id;
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600392
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600393 if (device->ftbl->drawctxt_destroy)
394 device->ftbl->drawctxt_destroy(device, context);
395 /*device specific drawctxt_destroy MUST clean up devctxt */
396 BUG_ON(context->devctxt);
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600397 /*
398 * Cancel events after the device-specific context is
399 * destroyed, to avoid possibly freeing memory while
400 * it is still in use by the GPU.
401 */
402 kgsl_cancel_events_ctxt(device, context);
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600403 idr_remove(&device->context_idr, id);
404 context->id = KGSL_CONTEXT_INVALID;
405 kgsl_context_put(context);
406}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600408void
409kgsl_context_destroy(struct kref *kref)
410{
411 struct kgsl_context *context = container_of(kref, struct kgsl_context,
412 refcount);
413 kfree(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414}
415
Jeremy Gebben84d75d02012-03-01 14:47:45 -0700416void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600418 struct kgsl_device *device = container_of(work, struct kgsl_device,
419 ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700420 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 uint32_t ts_processed;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700422 unsigned int id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600424 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
Jordan Croused4bc9d22011-11-17 13:39:21 -0700426 /* Process expired events */
427 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
Jeremy Gebben731dac52012-05-10 11:13:42 -0600428 ts_processed = kgsl_readtimestamp(device, event->context,
429 KGSL_TIMESTAMP_RETIRED);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700430 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700431 continue;
432
433 id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
Jordan Croused4bc9d22011-11-17 13:39:21 -0700434
435 if (event->func)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700436 event->func(device, event->priv, id, ts_processed);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700437
438 list_del(&event->list);
439 kfree(event);
440 }
441
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700442 device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID;
443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 mutex_unlock(&device->mutex);
445}
Jeremy Gebben84d75d02012-03-01 14:47:45 -0700446EXPORT_SYMBOL(kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447
448static void kgsl_check_idle_locked(struct kgsl_device *device)
449{
450 if (device->pwrctrl.nap_allowed == true &&
451 device->state == KGSL_STATE_ACTIVE &&
452 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700453 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 if (kgsl_pwrctrl_sleep(device) != 0)
455 mod_timer(&device->idle_timer,
456 jiffies +
457 device->pwrctrl.interval_timeout);
458 }
459}
460
461static void kgsl_check_idle(struct kgsl_device *device)
462{
463 mutex_lock(&device->mutex);
464 kgsl_check_idle_locked(device);
465 mutex_unlock(&device->mutex);
466}
467
468struct kgsl_device *kgsl_get_device(int dev_idx)
469{
470 int i;
471 struct kgsl_device *ret = NULL;
472
473 mutex_lock(&kgsl_driver.devlock);
474
475 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
476 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
477 ret = kgsl_driver.devp[i];
478 break;
479 }
480 }
481
482 mutex_unlock(&kgsl_driver.devlock);
483 return ret;
484}
485EXPORT_SYMBOL(kgsl_get_device);
486
487static struct kgsl_device *kgsl_get_minor(int minor)
488{
489 struct kgsl_device *ret = NULL;
490
491 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
492 return NULL;
493
494 mutex_lock(&kgsl_driver.devlock);
495 ret = kgsl_driver.devp[minor];
496 mutex_unlock(&kgsl_driver.devlock);
497
498 return ret;
499}
500
501int kgsl_register_ts_notifier(struct kgsl_device *device,
502 struct notifier_block *nb)
503{
504 BUG_ON(device == NULL);
505 return atomic_notifier_chain_register(&device->ts_notifier_list,
506 nb);
507}
508EXPORT_SYMBOL(kgsl_register_ts_notifier);
509
510int kgsl_unregister_ts_notifier(struct kgsl_device *device,
511 struct notifier_block *nb)
512{
513 BUG_ON(device == NULL);
514 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
515 nb);
516}
517EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
518
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700519int kgsl_check_timestamp(struct kgsl_device *device,
520 struct kgsl_context *context, unsigned int timestamp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521{
522 unsigned int ts_processed;
523
Jeremy Gebben731dac52012-05-10 11:13:42 -0600524 ts_processed = kgsl_readtimestamp(device, context,
525 KGSL_TIMESTAMP_RETIRED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526
Jordan Crousee6239dd2011-11-17 13:39:21 -0700527 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700528}
529EXPORT_SYMBOL(kgsl_check_timestamp);
530
531static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
532{
533 int status = -EINVAL;
534 unsigned int nap_allowed_saved;
535 struct kgsl_pwrscale_policy *policy_saved;
536
537 if (!device)
538 return -EINVAL;
539
540 KGSL_PWR_WARN(device, "suspend start\n");
541
542 mutex_lock(&device->mutex);
543 nap_allowed_saved = device->pwrctrl.nap_allowed;
544 device->pwrctrl.nap_allowed = false;
545 policy_saved = device->pwrscale.policy;
546 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700547 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 /* Make sure no user process is waiting for a timestamp *
549 * before supending */
550 if (device->active_cnt != 0) {
551 mutex_unlock(&device->mutex);
552 wait_for_completion(&device->suspend_gate);
553 mutex_lock(&device->mutex);
554 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800555 /* Don't let the timer wake us during suspended sleep. */
556 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557 switch (device->state) {
558 case KGSL_STATE_INIT:
559 break;
560 case KGSL_STATE_ACTIVE:
561 /* Wait for the device to become idle */
562 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
563 case KGSL_STATE_NAP:
564 case KGSL_STATE_SLEEP:
565 /* Get the completion ready to be waited upon. */
566 INIT_COMPLETION(device->hwaccess_gate);
567 device->ftbl->suspend_context(device);
568 device->ftbl->stop(device);
Suman Tatiraju48e72762012-05-03 11:12:03 -0700569 pm_qos_update_request(&device->pm_qos_req_dma,
570 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700571 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700573 case KGSL_STATE_SLUMBER:
574 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700575 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700576 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 default:
578 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
579 device->id);
580 goto end;
581 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700582 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583 device->pwrctrl.nap_allowed = nap_allowed_saved;
584 device->pwrscale.policy = policy_saved;
585 status = 0;
586
587end:
588 mutex_unlock(&device->mutex);
589 KGSL_PWR_WARN(device, "suspend end\n");
590 return status;
591}
592
593static int kgsl_resume_device(struct kgsl_device *device)
594{
595 int status = -EINVAL;
596
597 if (!device)
598 return -EINVAL;
599
600 KGSL_PWR_WARN(device, "resume start\n");
601 mutex_lock(&device->mutex);
602 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700603 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700604 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 complete_all(&device->hwaccess_gate);
606 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700607 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700609 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700610 KGSL_PWR_WARN(device, "resume end\n");
611 return status;
612}
613
614static int kgsl_suspend(struct device *dev)
615{
616
617 pm_message_t arg = {0};
618 struct kgsl_device *device = dev_get_drvdata(dev);
619 return kgsl_suspend_device(device, arg);
620}
621
622static int kgsl_resume(struct device *dev)
623{
624 struct kgsl_device *device = dev_get_drvdata(dev);
625 return kgsl_resume_device(device);
626}
627
628static int kgsl_runtime_suspend(struct device *dev)
629{
630 return 0;
631}
632
633static int kgsl_runtime_resume(struct device *dev)
634{
635 return 0;
636}
637
638const struct dev_pm_ops kgsl_pm_ops = {
639 .suspend = kgsl_suspend,
640 .resume = kgsl_resume,
641 .runtime_suspend = kgsl_runtime_suspend,
642 .runtime_resume = kgsl_runtime_resume,
643};
644EXPORT_SYMBOL(kgsl_pm_ops);
645
646void kgsl_early_suspend_driver(struct early_suspend *h)
647{
648 struct kgsl_device *device = container_of(h,
649 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700650 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530651 mutex_lock(&device->mutex);
Lucille Sylvester344e4622012-01-18 15:53:21 -0700652 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700653 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530654 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700655 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656}
657EXPORT_SYMBOL(kgsl_early_suspend_driver);
658
659int kgsl_suspend_driver(struct platform_device *pdev,
660 pm_message_t state)
661{
662 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
663 return kgsl_suspend_device(device, state);
664}
665EXPORT_SYMBOL(kgsl_suspend_driver);
666
667int kgsl_resume_driver(struct platform_device *pdev)
668{
669 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
670 return kgsl_resume_device(device);
671}
672EXPORT_SYMBOL(kgsl_resume_driver);
673
674void kgsl_late_resume_driver(struct early_suspend *h)
675{
676 struct kgsl_device *device = container_of(h,
677 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700678 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530679 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700680 device->pwrctrl.restore_slumber = 0;
Nilesh Shah94bdf2f2012-05-02 22:42:57 +0530681 if (device->pwrscale.policy == NULL)
682 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700683 kgsl_pwrctrl_wake(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530684 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700685 kgsl_check_idle(device);
686 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700687}
688EXPORT_SYMBOL(kgsl_late_resume_driver);
689
690/* file operations */
691static struct kgsl_process_private *
692kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
693{
694 struct kgsl_process_private *private;
695
696 mutex_lock(&kgsl_driver.process_mutex);
697 list_for_each_entry(private, &kgsl_driver.process_list, list) {
698 if (private->pid == task_tgid_nr(current)) {
699 private->refcnt++;
700 goto out;
701 }
702 }
703
704 /* no existing process private found for this dev_priv, create one */
705 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
706 if (private == NULL) {
707 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
708 sizeof(struct kgsl_process_private));
709 goto out;
710 }
711
712 spin_lock_init(&private->mem_lock);
713 private->refcnt = 1;
714 private->pid = task_tgid_nr(current);
Jordan Crousec9559e42012-04-05 16:55:56 -0600715 private->mem_rb = RB_ROOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600717 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 {
719 unsigned long pt_name;
720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 private->pagetable = kgsl_mmu_getpagetable(pt_name);
723 if (private->pagetable == NULL) {
724 kfree(private);
725 private = NULL;
726 goto out;
727 }
728 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729
730 list_add(&private->list, &kgsl_driver.process_list);
731
732 kgsl_process_init_sysfs(private);
733
734out:
735 mutex_unlock(&kgsl_driver.process_mutex);
736 return private;
737}
738
739static void
740kgsl_put_process_private(struct kgsl_device *device,
741 struct kgsl_process_private *private)
742{
743 struct kgsl_mem_entry *entry = NULL;
Jordan Crousec9559e42012-04-05 16:55:56 -0600744 struct rb_node *node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745
746 if (!private)
747 return;
748
749 mutex_lock(&kgsl_driver.process_mutex);
750
751 if (--private->refcnt)
752 goto unlock;
753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754 kgsl_process_uninit_sysfs(private);
755
756 list_del(&private->list);
757
Jordan Crousec9559e42012-04-05 16:55:56 -0600758 for (node = rb_first(&private->mem_rb); node; ) {
759 entry = rb_entry(node, struct kgsl_mem_entry, node);
760 node = rb_next(&entry->node);
761
762 rb_erase(&entry->node, &private->mem_rb);
Jordan Crouse00714012012-03-16 14:53:40 -0600763 kgsl_mem_entry_detach_process(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 kgsl_mmu_putpagetable(private->pagetable);
766 kfree(private);
767unlock:
768 mutex_unlock(&kgsl_driver.process_mutex);
769}
770
771static int kgsl_release(struct inode *inodep, struct file *filep)
772{
773 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600774 struct kgsl_device_private *dev_priv = filep->private_data;
775 struct kgsl_process_private *private = dev_priv->process_priv;
776 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777 struct kgsl_context *context;
778 int next = 0;
779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700780 filep->private_data = NULL;
781
782 mutex_lock(&device->mutex);
783 kgsl_check_suspended(device);
784
785 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600786 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 if (context == NULL)
788 break;
789
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600790 if (context->dev_priv == dev_priv)
791 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792
793 next = next + 1;
794 }
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600795 /*
796 * Clean up any to-be-freed entries that belong to this
797 * process and this device. This is done after the context
798 * are destroyed to avoid possibly freeing memory while
799 * it is still in use by the GPU.
800 */
801 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802
803 device->open_count--;
804 if (device->open_count == 0) {
805 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700806 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700808
809 mutex_unlock(&device->mutex);
810 kfree(dev_priv);
811
812 kgsl_put_process_private(device, private);
813
814 pm_runtime_put(device->parentdev);
815 return result;
816}
817
818static int kgsl_open(struct inode *inodep, struct file *filep)
819{
820 int result;
821 struct kgsl_device_private *dev_priv;
822 struct kgsl_device *device;
823 unsigned int minor = iminor(inodep);
824
825 device = kgsl_get_minor(minor);
826 BUG_ON(device == NULL);
827
828 if (filep->f_flags & O_EXCL) {
829 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
830 return -EBUSY;
831 }
832
833 result = pm_runtime_get_sync(device->parentdev);
834 if (result < 0) {
835 KGSL_DRV_ERR(device,
836 "Runtime PM: Unable to wake up the device, rc = %d\n",
837 result);
838 return result;
839 }
840 result = 0;
841
842 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
843 if (dev_priv == NULL) {
844 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
845 sizeof(struct kgsl_device_private));
846 result = -ENOMEM;
847 goto err_pmruntime;
848 }
849
850 dev_priv->device = device;
851 filep->private_data = dev_priv;
852
853 /* Get file (per process) private struct */
854 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
855 if (dev_priv->process_priv == NULL) {
856 result = -ENOMEM;
857 goto err_freedevpriv;
858 }
859
860 mutex_lock(&device->mutex);
861 kgsl_check_suspended(device);
862
863 if (device->open_count == 0) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700864 kgsl_sharedmem_set(&device->memstore, 0, 0,
865 device->memstore.size);
866
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 result = device->ftbl->start(device, true);
868
869 if (result) {
870 mutex_unlock(&device->mutex);
871 goto err_putprocess;
872 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700873 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700874 }
875 device->open_count++;
876 mutex_unlock(&device->mutex);
877
878 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
879 device->name, kgsl_mmu_enabled() ? "on" : "off",
880 kgsl_pagetable_count);
881
882 return result;
883
884err_putprocess:
885 kgsl_put_process_private(device, dev_priv->process_priv);
886err_freedevpriv:
887 filep->private_data = NULL;
888 kfree(dev_priv);
889err_pmruntime:
890 pm_runtime_put(device->parentdev);
891 return result;
892}
893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894/*call with private->mem_lock locked */
895struct kgsl_mem_entry *
896kgsl_sharedmem_find_region(struct kgsl_process_private *private,
Jordan Crousec9559e42012-04-05 16:55:56 -0600897 unsigned int gpuaddr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700898{
Jordan Crousec9559e42012-04-05 16:55:56 -0600899 struct rb_node *node = private->mem_rb.rb_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700900
Jordan Crousec9559e42012-04-05 16:55:56 -0600901 while (node != NULL) {
902 struct kgsl_mem_entry *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903
Jordan Crousec9559e42012-04-05 16:55:56 -0600904 entry = rb_entry(node, struct kgsl_mem_entry, node);
905
906
907 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size))
908 return entry;
909
910 if (gpuaddr < entry->memdesc.gpuaddr)
911 node = node->rb_left;
912 else if (gpuaddr >=
913 (entry->memdesc.gpuaddr + entry->memdesc.size))
914 node = node->rb_right;
915 else {
916 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 }
918 }
919
Jordan Crousec9559e42012-04-05 16:55:56 -0600920 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700921}
922EXPORT_SYMBOL(kgsl_sharedmem_find_region);
923
Jordan Crousec9559e42012-04-05 16:55:56 -0600924/*call with private->mem_lock locked */
925static inline struct kgsl_mem_entry *
926kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
927{
928 return kgsl_sharedmem_find_region(private, gpuaddr, 1);
929}
930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700931/*call all ioctl sub functions with driver locked*/
932static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
933 unsigned int cmd, void *data)
934{
935 int result = 0;
936 struct kgsl_device_getproperty *param = data;
937
938 switch (param->type) {
939 case KGSL_PROP_VERSION:
940 {
941 struct kgsl_version version;
942 if (param->sizebytes != sizeof(version)) {
943 result = -EINVAL;
944 break;
945 }
946
947 version.drv_major = KGSL_VERSION_MAJOR;
948 version.drv_minor = KGSL_VERSION_MINOR;
949 version.dev_major = dev_priv->device->ver_major;
950 version.dev_minor = dev_priv->device->ver_minor;
951
952 if (copy_to_user(param->value, &version, sizeof(version)))
953 result = -EFAULT;
954
955 break;
956 }
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700957 case KGSL_PROP_GPU_RESET_STAT:
958 {
959 /* Return reset status of given context and clear it */
960 uint32_t id;
961 struct kgsl_context *context;
962
963 if (param->sizebytes != sizeof(unsigned int)) {
964 result = -EINVAL;
965 break;
966 }
967 /* We expect the value passed in to contain the context id */
968 if (copy_from_user(&id, param->value,
969 sizeof(unsigned int))) {
970 result = -EFAULT;
971 break;
972 }
973 context = kgsl_find_context(dev_priv, id);
974 if (!context) {
975 result = -EINVAL;
976 break;
977 }
978 /*
979 * Copy the reset status to value which also serves as
980 * the out parameter
981 */
982 if (copy_to_user(param->value, &(context->reset_status),
983 sizeof(unsigned int))) {
984 result = -EFAULT;
985 break;
986 }
987 /* Clear reset status once its been queried */
988 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
989 break;
990 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 default:
992 result = dev_priv->device->ftbl->getproperty(
993 dev_priv->device, param->type,
994 param->value, param->sizebytes);
995 }
996
997
998 return result;
999}
1000
Jordan Crouseed7dd7f2012-03-29 13:16:02 -06001001static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
1002 unsigned int cmd, void *data)
1003{
1004 int result = 0;
1005 /* The getproperty struct is reused for setproperty too */
1006 struct kgsl_device_getproperty *param = data;
1007
1008 if (dev_priv->device->ftbl->setproperty)
1009 result = dev_priv->device->ftbl->setproperty(
1010 dev_priv->device, param->type,
1011 param->value, param->sizebytes);
1012
1013 return result;
1014}
1015
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001016static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
1017 struct kgsl_context *context,
1018 unsigned int timestamp,
1019 unsigned int timeout)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020{
1021 int result = 0;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001022 struct kgsl_device *device = dev_priv->device;
1023 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001024
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001025 /* Set the active count so that suspend doesn't do the wrong thing */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001027 device->active_cnt++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001029 trace_kgsl_waittimestamp_entry(device, context_id,
1030 kgsl_readtimestamp(device, context,
1031 KGSL_TIMESTAMP_RETIRED),
1032 timestamp, timeout);
Norman Geed7402ff2011-10-28 08:51:11 -06001033
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001034 result = device->ftbl->waittimestamp(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001035 context, timestamp, timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001037 trace_kgsl_waittimestamp_exit(device,
1038 kgsl_readtimestamp(device, context,
1039 KGSL_TIMESTAMP_RETIRED),
1040 result);
Norman Geed7402ff2011-10-28 08:51:11 -06001041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 /* Fire off any pending suspend operations that are in flight */
1043
1044 INIT_COMPLETION(dev_priv->device->suspend_gate);
1045 dev_priv->device->active_cnt--;
1046 complete(&dev_priv->device->suspend_gate);
1047
1048 return result;
1049}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001051static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
1052 *dev_priv, unsigned int cmd,
1053 void *data)
1054{
1055 struct kgsl_device_waittimestamp *param = data;
1056
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001057 return _device_waittimestamp(dev_priv, NULL,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001058 param->timestamp, param->timeout);
1059}
1060
1061static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
1062 *dev_priv, unsigned int cmd,
1063 void *data)
1064{
1065 struct kgsl_device_waittimestamp_ctxtid *param = data;
1066 struct kgsl_context *context;
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001067 int result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001068
1069 context = kgsl_find_context(dev_priv, param->context_id);
1070 if (context == NULL) {
1071 KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
1072 param->context_id);
1073 return -EINVAL;
1074 }
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001075 /*
1076 * A reference count is needed here, because waittimestamp may
1077 * block with the device mutex unlocked and userspace could
1078 * request for the context to be destroyed during that time.
1079 */
1080 kgsl_context_get(context);
1081 result = _device_waittimestamp(dev_priv, context,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001082 param->timestamp, param->timeout);
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001083 kgsl_context_put(context);
1084 return result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001085}
1086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
1088 unsigned int cmd, void *data)
1089{
1090 int result = 0;
1091 struct kgsl_ringbuffer_issueibcmds *param = data;
1092 struct kgsl_ibdesc *ibdesc;
1093 struct kgsl_context *context;
1094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1096 if (context == NULL) {
1097 result = -EINVAL;
1098 KGSL_DRV_ERR(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001099 "invalid context_id %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100 param->drawctxt_id);
1101 goto done;
1102 }
1103
1104 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
1105 KGSL_DRV_INFO(dev_priv->device,
1106 "Using IB list mode for ib submission, numibs: %d\n",
1107 param->numibs);
1108 if (!param->numibs) {
1109 KGSL_DRV_ERR(dev_priv->device,
1110 "Invalid numibs as parameter: %d\n",
1111 param->numibs);
1112 result = -EINVAL;
1113 goto done;
1114 }
1115
1116 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
1117 GFP_KERNEL);
1118 if (!ibdesc) {
1119 KGSL_MEM_ERR(dev_priv->device,
1120 "kzalloc(%d) failed\n",
1121 sizeof(struct kgsl_ibdesc) * param->numibs);
1122 result = -ENOMEM;
1123 goto done;
1124 }
1125
1126 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
1127 sizeof(struct kgsl_ibdesc) * param->numibs)) {
1128 result = -EFAULT;
1129 KGSL_DRV_ERR(dev_priv->device,
1130 "copy_from_user failed\n");
1131 goto free_ibdesc;
1132 }
1133 } else {
1134 KGSL_DRV_INFO(dev_priv->device,
1135 "Using single IB submission mode for ib submission\n");
1136 /* If user space driver is still using the old mode of
1137 * submitting single ib then we need to support that as well */
1138 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
1139 if (!ibdesc) {
1140 KGSL_MEM_ERR(dev_priv->device,
1141 "kzalloc(%d) failed\n",
1142 sizeof(struct kgsl_ibdesc));
1143 result = -ENOMEM;
1144 goto done;
1145 }
1146 ibdesc[0].gpuaddr = param->ibdesc_addr;
1147 ibdesc[0].sizedwords = param->numibs;
1148 param->numibs = 1;
1149 }
1150
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
1152 context,
1153 ibdesc,
1154 param->numibs,
1155 &param->timestamp,
1156 param->flags);
1157
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001158 trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
Wei Zouc8c01632012-03-24 17:27:26 -07001159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160free_ibdesc:
1161 kfree(ibdesc);
1162done:
1163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 return result;
1165}
1166
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001167static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
1168 struct kgsl_context *context, unsigned int type,
1169 unsigned int *timestamp)
1170{
Jeremy Gebben731dac52012-05-10 11:13:42 -06001171 *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001172
1173 trace_kgsl_readtimestamp(dev_priv->device,
1174 context ? context->id : KGSL_MEMSTORE_GLOBAL,
1175 type, *timestamp);
1176
1177 return 0;
1178}
1179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1181 *dev_priv, unsigned int cmd,
1182 void *data)
1183{
1184 struct kgsl_cmdstream_readtimestamp *param = data;
1185
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001186 return _cmdstream_readtimestamp(dev_priv, NULL,
1187 param->type, &param->timestamp);
1188}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001190static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
1191 *dev_priv, unsigned int cmd,
1192 void *data)
1193{
1194 struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
1195 struct kgsl_context *context;
Norman Geed7402ff2011-10-28 08:51:11 -06001196
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001197 context = kgsl_find_context(dev_priv, param->context_id);
1198 if (context == NULL) {
1199 KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
1200 param->context_id);
1201 return -EINVAL;
1202 }
1203
1204 return _cmdstream_readtimestamp(dev_priv, context,
1205 param->type, &param->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206}
1207
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001208static void kgsl_freemem_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001209 void *priv, u32 id, u32 timestamp)
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001210{
1211 struct kgsl_mem_entry *entry = priv;
1212 spin_lock(&entry->priv->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -06001213 rb_erase(&entry->node, &entry->priv->mem_rb);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001214 spin_unlock(&entry->priv->mem_lock);
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001215 trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
Jordan Crouse00714012012-03-16 14:53:40 -06001216 kgsl_mem_entry_detach_process(entry);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001217}
1218
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001219static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
1220 unsigned int gpuaddr, struct kgsl_context *context,
1221 unsigned int timestamp, unsigned int type)
1222{
1223 int result = 0;
1224 struct kgsl_mem_entry *entry = NULL;
1225 struct kgsl_device *device = dev_priv->device;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001226 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
1227
1228 spin_lock(&dev_priv->process_priv->mem_lock);
1229 entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
1230 spin_unlock(&dev_priv->process_priv->mem_lock);
1231
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001232 if (!entry) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001233 KGSL_DRV_ERR(dev_priv->device,
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001234 "invalid gpuaddr %08x\n", gpuaddr);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001235 result = -EINVAL;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001236 goto done;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001237 }
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001238 trace_kgsl_mem_timestamp_queue(device, entry, context_id,
1239 kgsl_readtimestamp(device, context,
1240 KGSL_TIMESTAMP_RETIRED),
1241 timestamp);
1242 result = kgsl_add_event(dev_priv->device, context_id, timestamp,
1243 kgsl_freemem_event_cb, entry, dev_priv);
1244done:
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001245 return result;
1246}
1247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1249 *dev_priv, unsigned int cmd,
1250 void *data)
1251{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001252 struct kgsl_cmdstream_freememontimestamp *param = data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001254 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1255 NULL, param->timestamp, param->type);
1256}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001257
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001258static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
1259 struct kgsl_device_private
1260 *dev_priv, unsigned int cmd,
1261 void *data)
1262{
1263 struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
1264 struct kgsl_context *context;
Jeremy Gebbena5859272012-03-01 12:46:28 -07001265
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001266 context = kgsl_find_context(dev_priv, param->context_id);
1267 if (context == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 KGSL_DRV_ERR(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001269 "invalid drawctxt context_id %d\n", param->context_id);
1270 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 }
1272
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001273 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1274 context, param->timestamp, param->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275}
1276
1277static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1278 unsigned int cmd, void *data)
1279{
1280 int result = 0;
1281 struct kgsl_drawctxt_create *param = data;
1282 struct kgsl_context *context = NULL;
1283
1284 context = kgsl_create_context(dev_priv);
1285
1286 if (context == NULL) {
1287 result = -ENOMEM;
1288 goto done;
1289 }
1290
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001291 if (dev_priv->device->ftbl->drawctxt_create) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001292 result = dev_priv->device->ftbl->drawctxt_create(
1293 dev_priv->device, dev_priv->process_priv->pagetable,
1294 context, param->flags);
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001295 if (result)
1296 goto done;
1297 }
1298 trace_kgsl_context_create(dev_priv->device, context, param->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299 param->drawctxt_id = context->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300done:
1301 if (result && context)
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001302 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303
1304 return result;
1305}
1306
1307static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1308 unsigned int cmd, void *data)
1309{
1310 int result = 0;
1311 struct kgsl_drawctxt_destroy *param = data;
1312 struct kgsl_context *context;
1313
1314 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1315
1316 if (context == NULL) {
1317 result = -EINVAL;
1318 goto done;
1319 }
1320
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001321 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322done:
1323 return result;
1324}
1325
1326static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1327 unsigned int cmd, void *data)
1328{
1329 int result = 0;
1330 struct kgsl_sharedmem_free *param = data;
1331 struct kgsl_process_private *private = dev_priv->process_priv;
1332 struct kgsl_mem_entry *entry = NULL;
1333
1334 spin_lock(&private->mem_lock);
1335 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1336 if (entry)
Jordan Crousec9559e42012-04-05 16:55:56 -06001337 rb_erase(&entry->node, &private->mem_rb);
1338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 spin_unlock(&private->mem_lock);
1340
1341 if (entry) {
Jeremy Gebbena5859272012-03-01 12:46:28 -07001342 trace_kgsl_mem_free(entry);
Jordan Crouse00714012012-03-16 14:53:40 -06001343 kgsl_mem_entry_detach_process(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344 } else {
1345 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1346 result = -EINVAL;
1347 }
1348
1349 return result;
1350}
1351
1352static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1353{
1354 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355
1356 down_read(&current->mm->mmap_sem);
1357 vma = find_vma(current->mm, addr);
1358 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001359 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362 return vma;
1363}
1364
1365static long
1366kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
1367 unsigned int cmd, void *data)
1368{
1369 int result = 0, len = 0;
1370 struct kgsl_process_private *private = dev_priv->process_priv;
1371 struct kgsl_sharedmem_from_vmalloc *param = data;
1372 struct kgsl_mem_entry *entry = NULL;
1373 struct vm_area_struct *vma;
1374
Harsh Vardhan Dwivedia9eb7cb2012-03-26 15:21:38 -06001375 KGSL_DEV_ERR_ONCE(dev_priv->device, "IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC"
1376 " is deprecated\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377 if (!kgsl_mmu_enabled())
1378 return -ENODEV;
1379
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 if (!param->hostptr) {
1381 KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
1382 result = -EINVAL;
1383 goto error;
1384 }
1385
1386 vma = kgsl_get_vma_from_start_addr(param->hostptr);
1387 if (!vma) {
1388 result = -EINVAL;
1389 goto error;
1390 }
Jordan Crouse2c542b62011-07-26 08:30:20 -06001391
1392 /*
1393 * If the user specified a length, use it, otherwise try to
1394 * infer the length if the vma region
1395 */
1396 if (param->gpuaddr != 0) {
1397 len = param->gpuaddr;
1398 } else {
1399 /*
1400 * For this to work, we have to assume the VMA region is only
1401 * for this single allocation. If it isn't, then bail out
1402 */
1403 if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
1404 KGSL_CORE_ERR("VMA region does not match hostaddr\n");
1405 result = -EINVAL;
1406 goto error;
1407 }
1408
1409 len = vma->vm_end - vma->vm_start;
1410 }
1411
1412 /* Make sure it fits */
1413 if (len == 0 || param->hostptr + len > vma->vm_end) {
1414 KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 result = -EINVAL;
1416 goto error;
1417 }
1418
1419 entry = kgsl_mem_entry_create();
1420 if (entry == NULL) {
1421 result = -ENOMEM;
1422 goto error;
1423 }
1424
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -06001425 result = kgsl_sharedmem_page_alloc_user(&entry->memdesc,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 private->pagetable, len,
1427 param->flags);
1428 if (result != 0)
1429 goto error_free_entry;
1430
1431 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1432
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -06001433 result = kgsl_sharedmem_map_vma(vma, &entry->memdesc);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 if (result) {
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -06001435 KGSL_CORE_ERR("kgsl_sharedmem_map_vma failed: %d\n", result);
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -06001436 goto error_free_alloc;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 }
1438
1439 param->gpuaddr = entry->memdesc.gpuaddr;
1440
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001441 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001442
1443 kgsl_mem_entry_attach_process(entry, private);
1444
Jeremy Gebbena5859272012-03-01 12:46:28 -07001445 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001446 /* Process specific statistics */
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001447 kgsl_process_add_stats(private, entry->memtype, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448
1449 kgsl_check_idle(dev_priv->device);
1450 return 0;
1451
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -06001452error_free_alloc:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 kgsl_sharedmem_free(&entry->memdesc);
1454
1455error_free_entry:
1456 kfree(entry);
1457
1458error:
1459 kgsl_check_idle(dev_priv->device);
1460 return result;
1461}
1462
1463static inline int _check_region(unsigned long start, unsigned long size,
1464 uint64_t len)
1465{
1466 uint64_t end = ((uint64_t) start) + size;
1467 return (end > len);
1468}
1469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1471 unsigned long *vstart, struct file **filep)
1472{
1473 struct file *fbfile;
1474 int ret = 0;
1475 dev_t rdev;
1476 struct fb_info *info;
1477
1478 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001479#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 if (!get_pmem_file(fd, start, vstart, len, filep))
1481 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001482#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001483
1484 fbfile = fget(fd);
1485 if (fbfile == NULL) {
1486 KGSL_CORE_ERR("fget_light failed\n");
1487 return -1;
1488 }
1489
1490 rdev = fbfile->f_dentry->d_inode->i_rdev;
1491 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1492 if (info) {
1493 *start = info->fix.smem_start;
1494 *len = info->fix.smem_len;
1495 *vstart = (unsigned long)__va(info->fix.smem_start);
1496 ret = 0;
1497 } else {
1498 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1499 MINOR(rdev));
1500 ret = -1;
1501 }
1502
1503 fput(fbfile);
1504
1505 return ret;
1506}
1507
1508static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1509 struct kgsl_pagetable *pagetable,
1510 unsigned int fd, unsigned int offset,
1511 size_t size)
1512{
1513 int ret;
1514 unsigned long phys, virt, len;
1515 struct file *filep;
1516
1517 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1518 if (ret)
1519 return ret;
1520
Wei Zou4061c0b2011-07-08 10:24:22 -07001521 if (phys == 0) {
1522 ret = -EINVAL;
1523 goto err;
1524 }
1525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001526 if (offset >= len) {
1527 ret = -EINVAL;
1528 goto err;
1529 }
1530
1531 if (size == 0)
1532 size = len;
1533
1534 /* Adjust the size of the region to account for the offset */
1535 size += offset & ~PAGE_MASK;
1536
1537 size = ALIGN(size, PAGE_SIZE);
1538
1539 if (_check_region(offset & PAGE_MASK, size, len)) {
1540 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1541 "than pmem region length %ld\n",
1542 offset & PAGE_MASK, size, len);
1543 ret = -EINVAL;
1544 goto err;
1545
1546 }
1547
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001548 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549
1550 entry->memdesc.pagetable = pagetable;
1551 entry->memdesc.size = size;
1552 entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
1553 entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001554
1555 ret = memdesc_sg_phys(&entry->memdesc,
1556 phys + (offset & PAGE_MASK), size);
1557 if (ret)
1558 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001559
1560 return 0;
1561err:
Jordan Crousefd978432011-09-02 14:34:32 -06001562#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001564#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565 return ret;
1566}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001567
Jordan Croused17e9aa2011-10-12 16:57:48 -06001568static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1569 void *addr, int size)
1570{
1571 int i;
1572 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1573 unsigned long paddr = (unsigned long) addr;
1574
Jordan Crousea652a072012-04-06 16:26:33 -06001575 memdesc->sg = kgsl_sg_alloc(sglen);
1576
Jordan Croused17e9aa2011-10-12 16:57:48 -06001577 if (memdesc->sg == NULL)
1578 return -ENOMEM;
1579
1580 memdesc->sglen = sglen;
1581 sg_init_table(memdesc->sg, sglen);
1582
1583 spin_lock(&current->mm->page_table_lock);
1584
1585 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1586 struct page *page;
1587 pmd_t *ppmd;
1588 pte_t *ppte;
1589 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1590
1591 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1592 goto err;
1593
Steve Mucklef132c6c2012-06-06 18:30:57 -07001594 ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001595 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1596 goto err;
1597
1598 ppte = pte_offset_map(ppmd, paddr);
1599 if (ppte == NULL)
1600 goto err;
1601
1602 page = pfn_to_page(pte_pfn(*ppte));
1603 if (!page)
1604 goto err;
1605
1606 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1607 pte_unmap(ppte);
1608 }
1609
1610 spin_unlock(&current->mm->page_table_lock);
1611
1612 return 0;
1613
1614err:
1615 spin_unlock(&current->mm->page_table_lock);
Jordan Crousea652a072012-04-06 16:26:33 -06001616 kgsl_sg_free(memdesc->sg, sglen);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001617 memdesc->sg = NULL;
1618
1619 return -EINVAL;
1620}
1621
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001622static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1623 struct kgsl_pagetable *pagetable,
1624 void *hostptr, unsigned int offset,
1625 size_t size)
1626{
1627 struct vm_area_struct *vma;
1628 unsigned int len;
1629
1630 down_read(&current->mm->mmap_sem);
1631 vma = find_vma(current->mm, (unsigned int) hostptr);
1632 up_read(&current->mm->mmap_sem);
1633
1634 if (!vma) {
1635 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1636 return -EINVAL;
1637 }
1638
1639 /* We don't necessarily start at vma->vm_start */
1640 len = vma->vm_end - (unsigned long) hostptr;
1641
1642 if (offset >= len)
1643 return -EINVAL;
1644
1645 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1646 !KGSL_IS_PAGE_ALIGNED(len)) {
1647 KGSL_CORE_ERR("user address len(%u)"
1648 "and start(%p) must be page"
1649 "aligned\n", len, hostptr);
1650 return -EINVAL;
1651 }
1652
1653 if (size == 0)
1654 size = len;
1655
1656 /* Adjust the size of the region to account for the offset */
1657 size += offset & ~PAGE_MASK;
1658
1659 size = ALIGN(size, PAGE_SIZE);
1660
1661 if (_check_region(offset & PAGE_MASK, size, len)) {
1662 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1663 "than region length %d\n",
1664 offset & PAGE_MASK, size, len);
1665 return -EINVAL;
1666 }
1667
1668 entry->memdesc.pagetable = pagetable;
1669 entry->memdesc.size = size;
1670 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001671
Jordan Croused17e9aa2011-10-12 16:57:48 -06001672 return memdesc_sg_virt(&entry->memdesc,
1673 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674}
1675
1676#ifdef CONFIG_ASHMEM
1677static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1678 struct kgsl_pagetable *pagetable,
1679 int fd, void *hostptr, size_t size)
1680{
1681 int ret;
1682 struct vm_area_struct *vma;
1683 struct file *filep, *vmfile;
1684 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001685 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686
Jordan Crouse2c542b62011-07-26 08:30:20 -06001687 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688 if (vma == NULL)
1689 return -EINVAL;
1690
Jordan Crouse2c542b62011-07-26 08:30:20 -06001691 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1692 KGSL_CORE_ERR("Invalid vma region\n");
1693 return -EINVAL;
1694 }
1695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 len = vma->vm_end - vma->vm_start;
1697
1698 if (size == 0)
1699 size = len;
1700
1701 if (size != len) {
1702 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1703 size, hostptr);
1704 return -EINVAL;
1705 }
1706
1707 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1708
1709 if (ret) {
1710 KGSL_CORE_ERR("get_ashmem_file failed\n");
1711 return ret;
1712 }
1713
1714 if (vmfile != vma->vm_file) {
1715 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1716 ret = -EINVAL;
1717 goto err;
1718 }
1719
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001720 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 entry->memdesc.pagetable = pagetable;
1722 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1723 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001724
1725 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1726 if (ret)
1727 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728
1729 return 0;
1730
1731err:
1732 put_ashmem_file(filep);
1733 return ret;
1734}
1735#else
1736static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1737 struct kgsl_pagetable *pagetable,
1738 int fd, void *hostptr, size_t size)
1739{
1740 return -EINVAL;
1741}
1742#endif
1743
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001744static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1745 struct kgsl_pagetable *pagetable, int fd)
1746{
1747 struct ion_handle *handle;
1748 struct scatterlist *s;
1749 unsigned long flags;
1750
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06001751 if (IS_ERR_OR_NULL(kgsl_ion_client))
1752 return -ENODEV;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001753
1754 handle = ion_import_fd(kgsl_ion_client, fd);
1755 if (IS_ERR_OR_NULL(handle))
1756 return PTR_ERR(handle);
1757
1758 entry->memtype = KGSL_MEM_ENTRY_ION;
1759 entry->priv_data = handle;
1760 entry->memdesc.pagetable = pagetable;
1761 entry->memdesc.size = 0;
1762
1763 if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
1764 goto err;
1765
1766 entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
1767
1768 if (IS_ERR_OR_NULL(entry->memdesc.sg))
1769 goto err;
1770
1771 /* Calculate the size of the memdesc from the sglist */
1772
1773 entry->memdesc.sglen = 0;
1774
1775 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1776 entry->memdesc.size += s->length;
1777 entry->memdesc.sglen++;
1778 }
1779
1780 return 0;
1781err:
1782 ion_free(kgsl_ion_client, handle);
1783 return -ENOMEM;
1784}
1785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001786static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1787 unsigned int cmd, void *data)
1788{
1789 int result = -EINVAL;
1790 struct kgsl_map_user_mem *param = data;
1791 struct kgsl_mem_entry *entry = NULL;
1792 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001793 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794
1795 entry = kgsl_mem_entry_create();
1796
1797 if (entry == NULL)
1798 return -ENOMEM;
1799
Jason848741a2011-07-12 10:24:25 -07001800 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1801 memtype = KGSL_USER_MEM_TYPE_PMEM;
1802 else
1803 memtype = param->memtype;
1804
1805 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001806 case KGSL_USER_MEM_TYPE_PMEM:
1807 if (param->fd == 0 || param->len == 0)
1808 break;
1809
1810 result = kgsl_setup_phys_file(entry, private->pagetable,
1811 param->fd, param->offset,
1812 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001813 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001814 break;
1815
1816 case KGSL_USER_MEM_TYPE_ADDR:
Harsh Vardhan Dwivedia9eb7cb2012-03-26 15:21:38 -06001817 KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type "
1818 "KGSL_USER_MEM_TYPE_ADDR is deprecated\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819 if (!kgsl_mmu_enabled()) {
1820 KGSL_DRV_ERR(dev_priv->device,
1821 "Cannot map paged memory with the "
1822 "MMU disabled\n");
1823 break;
1824 }
1825
1826 if (param->hostptr == 0)
1827 break;
1828
1829 result = kgsl_setup_hostptr(entry, private->pagetable,
1830 (void *) param->hostptr,
1831 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001832 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 break;
1834
1835 case KGSL_USER_MEM_TYPE_ASHMEM:
1836 if (!kgsl_mmu_enabled()) {
1837 KGSL_DRV_ERR(dev_priv->device,
1838 "Cannot map paged memory with the "
1839 "MMU disabled\n");
1840 break;
1841 }
1842
1843 if (param->hostptr == 0)
1844 break;
1845
1846 result = kgsl_setup_ashmem(entry, private->pagetable,
1847 param->fd, (void *) param->hostptr,
1848 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001849
1850 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001852 case KGSL_USER_MEM_TYPE_ION:
1853 result = kgsl_setup_ion(entry, private->pagetable,
1854 param->fd);
1855 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856 default:
Jason848741a2011-07-12 10:24:25 -07001857 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 break;
1859 }
1860
1861 if (result)
1862 goto error;
1863
1864 result = kgsl_mmu_map(private->pagetable,
1865 &entry->memdesc,
1866 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1867
1868 if (result)
1869 goto error_put_file_ptr;
1870
1871 /* Adjust the returned value for a non 4k aligned offset */
1872 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001875 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001876
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001877 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878
1879 kgsl_mem_entry_attach_process(entry, private);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001880 trace_kgsl_mem_map(entry, param->fd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881
1882 kgsl_check_idle(dev_priv->device);
1883 return result;
1884
Jeremy Gebben53d4dd02012-05-07 15:42:00 -06001885error_put_file_ptr:
1886 switch (entry->memtype) {
1887 case KGSL_MEM_ENTRY_PMEM:
1888 case KGSL_MEM_ENTRY_ASHMEM:
1889 if (entry->priv_data)
1890 fput(entry->priv_data);
1891 break;
1892 case KGSL_MEM_ENTRY_ION:
1893 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
1894 ion_free(kgsl_ion_client, entry->priv_data);
1895 break;
1896 default:
1897 break;
1898 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001899error:
1900 kfree(entry);
1901 kgsl_check_idle(dev_priv->device);
1902 return result;
1903}
1904
1905/*This function flushes a graphics memory allocation from CPU cache
1906 *when caching is enabled with MMU*/
1907static long
1908kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1909 unsigned int cmd, void *data)
1910{
1911 int result = 0;
1912 struct kgsl_mem_entry *entry;
1913 struct kgsl_sharedmem_free *param = data;
1914 struct kgsl_process_private *private = dev_priv->process_priv;
1915
1916 spin_lock(&private->mem_lock);
1917 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1918 if (!entry) {
1919 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1920 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001921 goto done;
1922 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001923 if (!entry->memdesc.hostptr) {
1924 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1925 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001927 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001928
1929 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001931 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001932 return result;
1933}
1934
1935static long
1936kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1937 unsigned int cmd, void *data)
1938{
1939 struct kgsl_process_private *private = dev_priv->process_priv;
1940 struct kgsl_gpumem_alloc *param = data;
1941 struct kgsl_mem_entry *entry;
1942 int result;
1943
1944 entry = kgsl_mem_entry_create();
1945 if (entry == NULL)
1946 return -ENOMEM;
1947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1949 param->size, param->flags);
1950
1951 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001952 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 kgsl_mem_entry_attach_process(entry, private);
1954 param->gpuaddr = entry->memdesc.gpuaddr;
1955
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001956 kgsl_process_add_stats(private, entry->memtype, param->size);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001957 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 } else
1959 kfree(entry);
1960
1961 kgsl_check_idle(dev_priv->device);
1962 return result;
1963}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001964static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1965 unsigned int cmd, void *data)
1966{
1967 int result = 0;
1968 struct kgsl_cff_syncmem *param = data;
1969 struct kgsl_process_private *private = dev_priv->process_priv;
1970 struct kgsl_mem_entry *entry = NULL;
1971
1972 spin_lock(&private->mem_lock);
1973 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1974 if (entry)
1975 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1976 param->len, true);
1977 else
1978 result = -EINVAL;
1979 spin_unlock(&private->mem_lock);
1980 return result;
1981}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001982
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001983static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1984 unsigned int cmd, void *data)
1985{
1986 int result = 0;
1987 struct kgsl_cff_user_event *param = data;
1988
1989 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1990 param->op3, param->op4, param->op5);
1991
1992 return result;
1993}
1994
Jordan Croused4bc9d22011-11-17 13:39:21 -07001995#ifdef CONFIG_GENLOCK
1996struct kgsl_genlock_event_priv {
1997 struct genlock_handle *handle;
1998 struct genlock *lock;
1999};
2000
2001/**
2002 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
2003 * @device - The KGSL device that expired the timestamp
2004 * @priv - private data for the event
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002005 * @context_id - the context id that goes with the timestamp
Jordan Croused4bc9d22011-11-17 13:39:21 -07002006 * @timestamp - the timestamp that triggered the event
2007 *
2008 * Release a genlock lock following the expiration of a timestamp
2009 */
2010
2011static void kgsl_genlock_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002012 void *priv, u32 context_id, u32 timestamp)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002013{
2014 struct kgsl_genlock_event_priv *ev = priv;
2015 int ret;
2016
2017 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
2018 if (ret)
2019 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
2020
2021 genlock_put_handle(ev->handle);
2022
2023 kfree(ev);
2024}
2025
2026/**
2027 * kgsl_add_genlock-event - Create a new genlock event
2028 * @device - KGSL device to create the event on
2029 * @timestamp - Timestamp to trigger the event
2030 * @data - User space buffer containing struct kgsl_genlock_event_priv
2031 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002032 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07002033 * @returns 0 on success or error code on error
2034 *
2035 * Attack to a genlock handle and register an event to release the
2036 * genlock lock when the timestamp expires
2037 */
2038
2039static int kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002040 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002041 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002042{
2043 struct kgsl_genlock_event_priv *event;
2044 struct kgsl_timestamp_event_genlock priv;
2045 int ret;
2046
2047 if (len != sizeof(priv))
2048 return -EINVAL;
2049
2050 if (copy_from_user(&priv, data, sizeof(priv)))
2051 return -EFAULT;
2052
2053 event = kzalloc(sizeof(*event), GFP_KERNEL);
2054
2055 if (event == NULL)
2056 return -ENOMEM;
2057
2058 event->handle = genlock_get_handle_fd(priv.handle);
2059
2060 if (IS_ERR(event->handle)) {
2061 int ret = PTR_ERR(event->handle);
2062 kfree(event);
2063 return ret;
2064 }
2065
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002066 ret = kgsl_add_event(device, context_id, timestamp,
2067 kgsl_genlock_event_cb, event, owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002068 if (ret)
2069 kfree(event);
2070
2071 return ret;
2072}
2073#else
2074static long kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002075 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002076 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002077{
2078 return -EINVAL;
2079}
2080#endif
2081
2082/**
2083 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
2084 * @dev_priv - pointer to the private device structure
2085 * @cmd - the ioctl cmd passed from kgsl_ioctl
2086 * @data - the user data buffer from kgsl_ioctl
2087 * @returns 0 on success or error code on failure
2088 */
2089
2090static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
2091 unsigned int cmd, void *data)
2092{
2093 struct kgsl_timestamp_event *param = data;
2094 int ret;
2095
2096 switch (param->type) {
2097 case KGSL_TIMESTAMP_EVENT_GENLOCK:
2098 ret = kgsl_add_genlock_event(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002099 param->context_id, param->timestamp, param->priv,
2100 param->len, dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002101 break;
2102 default:
2103 ret = -EINVAL;
2104 }
2105
2106 return ret;
2107}
2108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002109typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
2110 unsigned int, void *);
2111
2112#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
2113 [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
2114
2115static const struct {
2116 unsigned int cmd;
2117 kgsl_ioctl_func_t func;
2118 int lock;
2119} kgsl_ioctl_funcs[] = {
2120 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
2121 kgsl_ioctl_device_getproperty, 1),
2122 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
2123 kgsl_ioctl_device_waittimestamp, 1),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002124 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
2125 kgsl_ioctl_device_waittimestamp_ctxtid, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
2127 kgsl_ioctl_rb_issueibcmds, 1),
2128 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
2129 kgsl_ioctl_cmdstream_readtimestamp, 1),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002130 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
2131 kgsl_ioctl_cmdstream_readtimestamp_ctxtid, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
2133 kgsl_ioctl_cmdstream_freememontimestamp, 1),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002134 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
2135 kgsl_ioctl_cmdstream_freememontimestamp_ctxtid, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002136 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
2137 kgsl_ioctl_drawctxt_create, 1),
2138 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
2139 kgsl_ioctl_drawctxt_destroy, 1),
2140 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
2141 kgsl_ioctl_map_user_mem, 0),
2142 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
2143 kgsl_ioctl_map_user_mem, 0),
2144 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
2145 kgsl_ioctl_sharedmem_free, 0),
2146 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
2147 kgsl_ioctl_sharedmem_from_vmalloc, 0),
2148 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
2149 kgsl_ioctl_sharedmem_flush_cache, 0),
2150 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
2151 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06002152 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
2153 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06002154 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
2155 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07002156 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Lucille Sylvester9329cf02011-12-02 14:30:41 -07002157 kgsl_ioctl_timestamp_event, 1),
Jordan Crouseed7dd7f2012-03-29 13:16:02 -06002158 KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
2159 kgsl_ioctl_device_setproperty, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160};
2161
2162static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2163{
2164 struct kgsl_device_private *dev_priv = filep->private_data;
2165 unsigned int nr = _IOC_NR(cmd);
2166 kgsl_ioctl_func_t func;
2167 int lock, ret;
2168 char ustack[64];
2169 void *uptr = NULL;
2170
2171 BUG_ON(dev_priv == NULL);
2172
2173 /* Workaround for an previously incorrectly defined ioctl code.
2174 This helps ensure binary compatability */
2175
2176 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
2177 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07002178 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
2179 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180
2181 if (cmd & (IOC_IN | IOC_OUT)) {
2182 if (_IOC_SIZE(cmd) < sizeof(ustack))
2183 uptr = ustack;
2184 else {
2185 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
2186 if (uptr == NULL) {
2187 KGSL_MEM_ERR(dev_priv->device,
2188 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
2189 ret = -ENOMEM;
2190 goto done;
2191 }
2192 }
2193
2194 if (cmd & IOC_IN) {
2195 if (copy_from_user(uptr, (void __user *) arg,
2196 _IOC_SIZE(cmd))) {
2197 ret = -EFAULT;
2198 goto done;
2199 }
2200 } else
2201 memset(uptr, 0, _IOC_SIZE(cmd));
2202 }
2203
2204 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
2205 kgsl_ioctl_funcs[nr].func != NULL) {
2206 func = kgsl_ioctl_funcs[nr].func;
2207 lock = kgsl_ioctl_funcs[nr].lock;
2208 } else {
2209 func = dev_priv->device->ftbl->ioctl;
2210 if (!func) {
2211 KGSL_DRV_INFO(dev_priv->device,
2212 "invalid ioctl code %08x\n", cmd);
Jeremy Gebbenc15b4612012-01-09 09:44:11 -07002213 ret = -ENOIOCTLCMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 goto done;
2215 }
2216 lock = 1;
2217 }
2218
2219 if (lock) {
2220 mutex_lock(&dev_priv->device->mutex);
2221 kgsl_check_suspended(dev_priv->device);
2222 }
2223
2224 ret = func(dev_priv, cmd, uptr);
2225
2226 if (lock) {
2227 kgsl_check_idle_locked(dev_priv->device);
2228 mutex_unlock(&dev_priv->device->mutex);
2229 }
2230
2231 if (ret == 0 && (cmd & IOC_OUT)) {
2232 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
2233 ret = -EFAULT;
2234 }
2235
2236done:
2237 if (_IOC_SIZE(cmd) >= sizeof(ustack))
2238 kfree(uptr);
2239
2240 return ret;
2241}
2242
2243static int
2244kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
2245{
2246 struct kgsl_memdesc *memdesc = &device->memstore;
2247 int result;
2248 unsigned int vma_size = vma->vm_end - vma->vm_start;
2249
2250 /* The memstore can only be mapped as read only */
2251
2252 if (vma->vm_flags & VM_WRITE)
2253 return -EPERM;
2254
2255 if (memdesc->size != vma_size) {
2256 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
2257 vma_size, memdesc->size);
2258 return -EINVAL;
2259 }
2260
2261 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2262
2263 result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
2264 vma_size, vma->vm_page_prot);
2265 if (result != 0)
2266 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2267 result);
2268
2269 return result;
2270}
2271
Jordan Crouse4283e172011-09-26 14:45:47 -06002272/*
2273 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2274 * Increase the refcount to make sure that the accounting stays correct
2275 */
2276
2277static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2278{
2279 struct kgsl_mem_entry *entry = vma->vm_private_data;
2280 kgsl_mem_entry_get(entry);
2281}
2282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002283static int
2284kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2285{
2286 struct kgsl_mem_entry *entry = vma->vm_private_data;
2287
Jordan Croused17e9aa2011-10-12 16:57:48 -06002288 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289 return VM_FAULT_SIGBUS;
2290
2291 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2292}
2293
2294static void
2295kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2296{
2297 struct kgsl_mem_entry *entry = vma->vm_private_data;
2298 kgsl_mem_entry_put(entry);
2299}
2300
2301static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002302 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002303 .fault = kgsl_gpumem_vm_fault,
2304 .close = kgsl_gpumem_vm_close,
2305};
2306
2307static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2308{
2309 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310 struct kgsl_device_private *dev_priv = file->private_data;
2311 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crousec9559e42012-04-05 16:55:56 -06002312 struct kgsl_mem_entry *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002313 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002314
2315 /* Handle leagacy behavior for memstore */
2316
2317 if (vma_offset == device->memstore.physaddr)
2318 return kgsl_mmap_memstore(device, vma);
2319
2320 /* Find a chunk of GPU memory */
2321
2322 spin_lock(&private->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -06002323 entry = kgsl_sharedmem_find(private, vma_offset);
2324
2325 if (entry)
2326 kgsl_mem_entry_get(entry);
2327
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002328 spin_unlock(&private->mem_lock);
2329
2330 if (entry == NULL)
2331 return -EINVAL;
2332
Jordan Croused17e9aa2011-10-12 16:57:48 -06002333 if (!entry->memdesc.ops ||
2334 !entry->memdesc.ops->vmflags ||
2335 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002336 return -EINVAL;
2337
2338 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2339
2340 vma->vm_private_data = entry;
2341 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2342 vma->vm_ops = &kgsl_gpumem_vm_ops;
2343 vma->vm_file = file;
2344
2345 return 0;
2346}
2347
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002348static irqreturn_t kgsl_irq_handler(int irq, void *data)
2349{
2350 struct kgsl_device *device = data;
2351
2352 return device->ftbl->irq_handler(device);
2353
2354}
2355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002356static const struct file_operations kgsl_fops = {
2357 .owner = THIS_MODULE,
2358 .release = kgsl_release,
2359 .open = kgsl_open,
2360 .mmap = kgsl_mmap,
2361 .unlocked_ioctl = kgsl_ioctl,
2362};
2363
2364struct kgsl_driver kgsl_driver = {
2365 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2366 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2367 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2368};
2369EXPORT_SYMBOL(kgsl_driver);
2370
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002371static void _unregister_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002372{
2373 int minor;
2374
2375 mutex_lock(&kgsl_driver.devlock);
2376 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2377 if (device == kgsl_driver.devp[minor])
2378 break;
2379 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002380 if (minor != KGSL_DEVICE_MAX) {
2381 device_destroy(kgsl_driver.class,
2382 MKDEV(MAJOR(kgsl_driver.major), minor));
2383 kgsl_driver.devp[minor] = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002384 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002385 mutex_unlock(&kgsl_driver.devlock);
2386}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002388static int _register_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002389{
2390 int minor, ret;
2391 dev_t dev;
2392
2393 /* Find a minor for the device */
2394
2395 mutex_lock(&kgsl_driver.devlock);
2396 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2397 if (kgsl_driver.devp[minor] == NULL) {
2398 kgsl_driver.devp[minor] = device;
2399 break;
2400 }
2401 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002402 mutex_unlock(&kgsl_driver.devlock);
2403
2404 if (minor == KGSL_DEVICE_MAX) {
2405 KGSL_CORE_ERR("minor devices exhausted\n");
2406 return -ENODEV;
2407 }
2408
2409 /* Create the device */
2410 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2411 device->dev = device_create(kgsl_driver.class,
2412 device->parentdev,
2413 dev, device,
2414 device->name);
2415
2416 if (IS_ERR(device->dev)) {
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002417 mutex_lock(&kgsl_driver.devlock);
2418 kgsl_driver.devp[minor] = NULL;
2419 mutex_unlock(&kgsl_driver.devlock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002420 ret = PTR_ERR(device->dev);
2421 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002422 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002423 }
2424
2425 dev_set_drvdata(device->parentdev, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002427}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002428
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002429int kgsl_device_platform_probe(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430{
Michael Street8bacdd02012-01-05 14:55:01 -08002431 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002432 int status = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002433 struct resource *res;
2434 struct platform_device *pdev =
2435 container_of(device->parentdev, struct platform_device, dev);
2436
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002437 status = _register_device(device);
2438 if (status)
2439 return status;
2440
2441 /* Initialize logging first, so that failures below actually print. */
2442 kgsl_device_debugfs_init(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002443
2444 status = kgsl_pwrctrl_init(device);
2445 if (status)
2446 goto error;
2447
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06002448 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
2449
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002450 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2451 device->iomemname);
2452 if (res == NULL) {
2453 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2454 status = -EINVAL;
2455 goto error_pwrctrl_close;
2456 }
2457 if (res->start == 0 || resource_size(res) == 0) {
Jordan Crouse7501d452012-04-19 08:58:44 -06002458 KGSL_DRV_ERR(device, "dev %d invalid register region\n",
2459 device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002460 status = -EINVAL;
2461 goto error_pwrctrl_close;
2462 }
2463
Jordan Crouse7501d452012-04-19 08:58:44 -06002464 device->reg_phys = res->start;
2465 device->reg_len = resource_size(res);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002466
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002467 if (!devm_request_mem_region(device->dev, device->reg_phys,
2468 device->reg_len, device->name)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002469 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2470 status = -ENODEV;
2471 goto error_pwrctrl_close;
2472 }
2473
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002474 device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
2475 device->reg_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002476
Jordan Crouse7501d452012-04-19 08:58:44 -06002477 if (device->reg_virt == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002478 KGSL_DRV_ERR(device, "ioremap failed\n");
2479 status = -ENODEV;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002480 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002481 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002482 /*acquire interrupt */
2483 device->pwrctrl.interrupt_num =
2484 platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
2485
2486 if (device->pwrctrl.interrupt_num <= 0) {
2487 KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
2488 device->pwrctrl.interrupt_num);
2489 status = -EINVAL;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002490 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002491 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002492
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002493 status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
2494 kgsl_irq_handler, IRQF_TRIGGER_HIGH,
2495 device->name, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002496 if (status) {
2497 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2498 device->pwrctrl.interrupt_num, status);
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002499 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002500 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002501 disable_irq(device->pwrctrl.interrupt_num);
2502
2503 KGSL_DRV_INFO(device,
Jordan Crouse7501d452012-04-19 08:58:44 -06002504 "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
2505 device->id, device->reg_phys, device->reg_len,
2506 device->reg_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002507
Michael Street8bacdd02012-01-05 14:55:01 -08002508 result = kgsl_drm_init(pdev);
2509 if (result)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002510 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002511
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002512 kgsl_cffdump_open(device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002513
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002514 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2515 status = kgsl_create_device_workqueue(device);
2516 if (status)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002517 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002518
2519 status = kgsl_mmu_init(device);
2520 if (status != 0) {
2521 KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
2522 goto error_dest_work_q;
2523 }
2524
2525 status = kgsl_allocate_contiguous(&device->memstore,
Richard Ruigrok2ad5e9d2012-06-14 14:22:05 -07002526 KGSL_MEMSTORE_SIZE);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002527
2528 if (status != 0) {
2529 KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
2530 status);
2531 goto error_close_mmu;
2532 }
2533
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002534 pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
2535 PM_QOS_DEFAULT_VALUE);
2536
2537 /* Initalize the snapshot engine */
2538 kgsl_device_snapshot_init(device);
2539
2540 /* Initialize common sysfs entries */
2541 kgsl_pwrctrl_init_sysfs(device);
2542
2543 return 0;
2544
2545error_close_mmu:
2546 kgsl_mmu_close(device);
2547error_dest_work_q:
2548 destroy_workqueue(device->work_queue);
2549 device->work_queue = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002550error_pwrctrl_close:
2551 kgsl_pwrctrl_close(device);
2552error:
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002553 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002554 return status;
2555}
2556EXPORT_SYMBOL(kgsl_device_platform_probe);
2557
2558void kgsl_device_platform_remove(struct kgsl_device *device)
2559{
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002560 kgsl_device_snapshot_close(device);
2561
2562 kgsl_cffdump_close(device->id);
2563 kgsl_pwrctrl_uninit_sysfs(device);
2564
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002565 pm_qos_remove_request(&device->pm_qos_req_dma);
2566
2567 idr_destroy(&device->context_idr);
2568
2569 kgsl_sharedmem_free(&device->memstore);
2570
2571 kgsl_mmu_close(device);
2572
2573 if (device->work_queue) {
2574 destroy_workqueue(device->work_queue);
2575 device->work_queue = NULL;
2576 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002577 kgsl_pwrctrl_close(device);
2578
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002579 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002580}
2581EXPORT_SYMBOL(kgsl_device_platform_remove);
2582
2583static int __devinit
2584kgsl_ptdata_init(void)
2585{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -06002586 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count);
2587
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002588 if (!kgsl_driver.ptpool)
2589 return -ENOMEM;
2590 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002591}
2592
2593static void kgsl_core_exit(void)
2594{
Ranjhith Kalisamy4ad59e92012-05-31 19:15:11 +05302595 kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002596 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002597
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302598 kgsl_drm_exit();
2599 kgsl_cffdump_destroy();
2600 kgsl_core_debugfs_close();
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302601
Harsh Vardhan Dwivediefa6b012012-06-15 13:02:27 -06002602 /*
2603 * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
2604 * only if kgsl_driver.virtdev has been populated.
2605 * We check at least one member of kgsl_driver.virtdev to
2606 * see if it is not NULL (and thus, has been populated).
2607 */
2608 if (kgsl_driver.virtdev.class) {
2609 kgsl_sharedmem_uninit_sysfs();
2610 device_unregister(&kgsl_driver.virtdev);
2611 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002612
2613 if (kgsl_driver.class) {
2614 class_destroy(kgsl_driver.class);
2615 kgsl_driver.class = NULL;
2616 }
2617
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302618 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002619}
2620
2621static int __init kgsl_core_init(void)
2622{
2623 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002624 /* alloc major and minor device numbers */
2625 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2626 KGSL_NAME);
2627 if (result < 0) {
2628 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2629 goto err;
2630 }
2631
2632 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2633 kgsl_driver.cdev.owner = THIS_MODULE;
2634 kgsl_driver.cdev.ops = &kgsl_fops;
2635 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2636 KGSL_DEVICE_MAX);
2637
2638 if (result) {
2639 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2640 " result= %d\n", kgsl_driver.major, result);
2641 goto err;
2642 }
2643
2644 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2645
2646 if (IS_ERR(kgsl_driver.class)) {
2647 result = PTR_ERR(kgsl_driver.class);
2648 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2649 goto err;
2650 }
2651
2652 /* Make a virtual device for managing core related things
2653 in sysfs */
2654 kgsl_driver.virtdev.class = kgsl_driver.class;
2655 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2656 result = device_register(&kgsl_driver.virtdev);
2657 if (result) {
2658 KGSL_CORE_ERR("driver_register failed\n");
2659 goto err;
2660 }
2661
2662 /* Make kobjects in the virtual device for storing statistics */
2663
2664 kgsl_driver.ptkobj =
2665 kobject_create_and_add("pagetables",
2666 &kgsl_driver.virtdev.kobj);
2667
2668 kgsl_driver.prockobj =
2669 kobject_create_and_add("proc",
2670 &kgsl_driver.virtdev.kobj);
2671
2672 kgsl_core_debugfs_init();
2673
2674 kgsl_sharedmem_init_sysfs();
2675 kgsl_cffdump_init();
2676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002677 INIT_LIST_HEAD(&kgsl_driver.process_list);
2678
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002679 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2680
2681 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2682
2683 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2684 result = kgsl_ptdata_init();
2685 if (result)
2686 goto err;
2687 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002689 return 0;
2690
2691err:
2692 kgsl_core_exit();
2693 return result;
2694}
2695
2696module_init(kgsl_core_init);
2697module_exit(kgsl_core_exit);
2698
2699MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2700MODULE_DESCRIPTION("MSM GPU driver");
2701MODULE_LICENSE("GPL");