blob: c61da62ec6f753b61b3e63e8574ebc7d2c5733fe [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/fb.h>
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/interrupt.h>
20#include <linux/workqueue.h>
21#include <linux/android_pmem.h>
22#include <linux/vmalloc.h>
23#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070024#include <linux/genlock.h>
Jordan Crousec9559e42012-04-05 16:55:56 -060025#include <linux/rbtree.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/ashmem.h>
27#include <linux/major.h>
Mitchel Humpherys53044702012-09-06 10:36:51 -070028#include <linux/msm_ion.h>
Jeremy Gebben4204d0f2012-03-01 16:06:21 -070029#include <linux/io.h>
30#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32#include "kgsl.h"
33#include "kgsl_debugfs.h"
34#include "kgsl_cffdump.h"
35#include "kgsl_log.h"
36#include "kgsl_sharedmem.h"
37#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060038#include "kgsl_trace.h"
Jeff Boodyfe6c39c2012-08-09 13:54:50 -060039#include "kgsl_sync.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040
41#undef MODULE_PARAM_PREFIX
42#define MODULE_PARAM_PREFIX "kgsl."
43
44static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060045static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046module_param_named(ptcount, kgsl_pagetable_count, int, 0);
47MODULE_PARM_DESC(kgsl_pagetable_count,
48"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060049module_param_named(mmutype, ksgl_mmu_type, charp, 0);
50MODULE_PARM_DESC(ksgl_mmu_type,
51"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
Jordan Crouse8eab35a2011-10-12 16:57:48 -060053static struct ion_client *kgsl_ion_client;
54
Jordan Croused4bc9d22011-11-17 13:39:21 -070055/**
56 * kgsl_add_event - Add a new timstamp event for the KGSL device
57 * @device - KGSL device for the new event
58 * @ts - the timestamp to trigger the event on
59 * @cb - callback function to call when the timestamp expires
60 * @priv - private data for the specific event type
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070061 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -070062 *
63 * @returns - 0 on success or error code on failure
64 */
65
Shubhraprakash Dascb068072012-06-07 17:52:41 -060066int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
Carter Cooper7e7f02e2012-02-15 09:36:31 -070067 void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
Shubhraprakash Dascb068072012-06-07 17:52:41 -060068 void *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -070069{
70 struct kgsl_event *event;
71 struct list_head *n;
Carter Cooper7e7f02e2012-02-15 09:36:31 -070072 unsigned int cur_ts;
73 struct kgsl_context *context = NULL;
Jordan Croused4bc9d22011-11-17 13:39:21 -070074
75 if (cb == NULL)
76 return -EINVAL;
77
Carter Cooper7e7f02e2012-02-15 09:36:31 -070078 if (id != KGSL_MEMSTORE_GLOBAL) {
79 context = idr_find(&device->context_idr, id);
80 if (context == NULL)
81 return -EINVAL;
82 }
Jeremy Gebben731dac52012-05-10 11:13:42 -060083 cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
Carter Cooper7e7f02e2012-02-15 09:36:31 -070084
Jordan Croused4bc9d22011-11-17 13:39:21 -070085 /* Check to see if the requested timestamp has already fired */
86
Carter Cooper7e7f02e2012-02-15 09:36:31 -070087 if (timestamp_cmp(cur_ts, ts) >= 0) {
88 cb(device, priv, id, cur_ts);
Jordan Croused4bc9d22011-11-17 13:39:21 -070089 return 0;
90 }
91
92 event = kzalloc(sizeof(*event), GFP_KERNEL);
93 if (event == NULL)
94 return -ENOMEM;
95
Carter Cooper7e7f02e2012-02-15 09:36:31 -070096 event->context = context;
Jordan Croused4bc9d22011-11-17 13:39:21 -070097 event->timestamp = ts;
98 event->priv = priv;
99 event->func = cb;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700100 event->owner = owner;
Jordan Croused4bc9d22011-11-17 13:39:21 -0700101
Carter Cooperb50d03c2013-01-08 11:41:02 -0700102 /* inc refcount to avoid race conditions in cleanup */
103 if (context)
104 kgsl_context_get(context);
105
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700106 /*
107 * Add the event in order to the list. Order is by context id
108 * first and then by timestamp for that context.
109 */
Jordan Croused4bc9d22011-11-17 13:39:21 -0700110
111 for (n = device->events.next ; n != &device->events; n = n->next) {
112 struct kgsl_event *e =
113 list_entry(n, struct kgsl_event, list);
114
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700115 if (e->context != context)
116 continue;
117
Jordan Croused4bc9d22011-11-17 13:39:21 -0700118 if (timestamp_cmp(e->timestamp, ts) > 0) {
119 list_add(&event->list, n->prev);
120 break;
121 }
122 }
123
124 if (n == &device->events)
125 list_add_tail(&event->list, &device->events);
126
Jeremy Gebben63904832012-02-07 16:10:55 -0700127 queue_work(device->work_queue, &device->ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700128 return 0;
129}
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600130EXPORT_SYMBOL(kgsl_add_event);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700131
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700132/**
Lynus Vazf519d2c2012-04-25 15:46:08 +0530133 * kgsl_cancel_events_ctxt - Cancel all events for a context
134 * @device - KGSL device for the events to cancel
135 * @ctxt - context whose events we want to cancel
136 *
137 */
138static void kgsl_cancel_events_ctxt(struct kgsl_device *device,
139 struct kgsl_context *context)
140{
141 struct kgsl_event *event, *event_tmp;
142 unsigned int id, cur;
143
Jeremy Gebben731dac52012-05-10 11:13:42 -0600144 cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
Lynus Vazf519d2c2012-04-25 15:46:08 +0530145 id = context->id;
146
147 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
148 if (event->context != context)
149 continue;
150
151 /*
152 * "cancel" the events by calling their callback.
153 * Currently, events are used for lock and memory
154 * management, so if the process is dying the right
155 * thing to do is release or free.
156 */
157 if (event->func)
158 event->func(device, event->priv, id, cur);
159
Carter Cooperb50d03c2013-01-08 11:41:02 -0700160 kgsl_context_put(context);
Lynus Vazf519d2c2012-04-25 15:46:08 +0530161 list_del(&event->list);
162 kfree(event);
163 }
164}
165
166/**
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700167 * kgsl_cancel_events - Cancel all events for a process
168 * @device - KGSL device for the events to cancel
169 * @owner - driver instance that owns the events to cancel
170 *
171 */
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600172void kgsl_cancel_events(struct kgsl_device *device,
173 void *owner)
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700174{
175 struct kgsl_event *event, *event_tmp;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700176 unsigned int id, cur;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700177
178 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
179 if (event->owner != owner)
180 continue;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700181
Jeremy Gebben731dac52012-05-10 11:13:42 -0600182 cur = kgsl_readtimestamp(device, event->context,
183 KGSL_TIMESTAMP_RETIRED);
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700184
185 id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700186 /*
187 * "cancel" the events by calling their callback.
188 * Currently, events are used for lock and memory
189 * management, so if the process is dying the right
190 * thing to do is release or free.
191 */
192 if (event->func)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700193 event->func(device, event->priv, id, cur);
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700194
Carter Cooperb50d03c2013-01-08 11:41:02 -0700195 if (event->context)
196 kgsl_context_put(event->context);
197
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700198 list_del(&event->list);
199 kfree(event);
200 }
201}
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600202EXPORT_SYMBOL(kgsl_cancel_events);
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700203
Jordan Crouse0fdf3a02012-03-16 14:53:41 -0600204/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700205 * @device - Pointer to the device structure
Jordan Crouse0fdf3a02012-03-16 14:53:41 -0600206 * @ptbase - the pagetable base of the object
207 * @gpuaddr - the GPU address of the object
208 * @size - Size of the region to search
209 */
210
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700211struct kgsl_mem_entry *kgsl_get_mem_entry(struct kgsl_device *device,
212 unsigned int ptbase, unsigned int gpuaddr, unsigned int size)
Jordan Crouse0fdf3a02012-03-16 14:53:41 -0600213{
214 struct kgsl_process_private *priv;
215 struct kgsl_mem_entry *entry;
216
217 mutex_lock(&kgsl_driver.process_mutex);
218
219 list_for_each_entry(priv, &kgsl_driver.process_list, list) {
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700220 if (!kgsl_mmu_pt_equal(&device->mmu, priv->pagetable, ptbase))
Jordan Crouse0fdf3a02012-03-16 14:53:41 -0600221 continue;
222 spin_lock(&priv->mem_lock);
223 entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
224
225 if (entry) {
226 spin_unlock(&priv->mem_lock);
227 mutex_unlock(&kgsl_driver.process_mutex);
228 return entry;
229 }
230 spin_unlock(&priv->mem_lock);
231 }
232 mutex_unlock(&kgsl_driver.process_mutex);
233
234 return NULL;
235}
236EXPORT_SYMBOL(kgsl_get_mem_entry);
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238static inline struct kgsl_mem_entry *
239kgsl_mem_entry_create(void)
240{
241 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
242
243 if (!entry)
244 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
245 else
246 kref_init(&entry->refcount);
247
248 return entry;
249}
250
251void
252kgsl_mem_entry_destroy(struct kref *kref)
253{
254 struct kgsl_mem_entry *entry = container_of(kref,
255 struct kgsl_mem_entry,
256 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600257
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600258 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
259 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600261 /*
Laura Abbottb14ed962012-01-30 14:18:08 -0800262 * Ion takes care of freeing the sglist for us so
263 * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600264 * doesn't try to free it again
265 */
266
267 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600268 entry->memdesc.sg = NULL;
269 }
270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 kgsl_sharedmem_free(&entry->memdesc);
272
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600273 switch (entry->memtype) {
274 case KGSL_MEM_ENTRY_PMEM:
275 case KGSL_MEM_ENTRY_ASHMEM:
276 if (entry->priv_data)
277 fput(entry->priv_data);
278 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600279 case KGSL_MEM_ENTRY_ION:
280 ion_free(kgsl_ion_client, entry->priv_data);
281 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 }
283
284 kfree(entry);
285}
286EXPORT_SYMBOL(kgsl_mem_entry_destroy);
287
288static
289void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
290 struct kgsl_process_private *process)
291{
Jordan Crousec9559e42012-04-05 16:55:56 -0600292 struct rb_node **node;
293 struct rb_node *parent = NULL;
294
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 spin_lock(&process->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -0600296
297 node = &process->mem_rb.rb_node;
298
299 while (*node) {
300 struct kgsl_mem_entry *cur;
301
302 parent = *node;
303 cur = rb_entry(parent, struct kgsl_mem_entry, node);
304
305 if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr)
306 node = &parent->rb_left;
307 else
308 node = &parent->rb_right;
309 }
310
311 rb_link_node(&entry->node, parent, node);
312 rb_insert_color(&entry->node, &process->mem_rb);
313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 spin_unlock(&process->mem_lock);
315
316 entry->priv = process;
317}
318
Jordan Crouse00714012012-03-16 14:53:40 -0600319/* Detach a memory entry from a process and unmap it from the MMU */
320
321static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
322{
323 if (entry == NULL)
324 return;
325
326 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
327 entry->priv = NULL;
328
329 kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
330
331 kgsl_mem_entry_put(entry);
332}
333
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334/* Allocate a new context id */
335
336static struct kgsl_context *
337kgsl_create_context(struct kgsl_device_private *dev_priv)
338{
339 struct kgsl_context *context;
340 int ret, id;
341
342 context = kzalloc(sizeof(*context), GFP_KERNEL);
343
344 if (context == NULL)
345 return NULL;
346
347 while (1) {
348 if (idr_pre_get(&dev_priv->device->context_idr,
349 GFP_KERNEL) == 0) {
350 kfree(context);
351 return NULL;
352 }
353
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700354 ret = idr_get_new_above(&dev_priv->device->context_idr,
355 context, 1, &id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356
357 if (ret != -EAGAIN)
358 break;
359 }
360
361 if (ret) {
362 kfree(context);
363 return NULL;
364 }
365
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700366 /* MAX - 1, there is one memdesc in memstore for device info */
367 if (id >= KGSL_MEMSTORE_MAX) {
368 KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
369 "ctxts due to memstore limitation\n",
370 KGSL_MEMSTORE_MAX);
371 idr_remove(&dev_priv->device->context_idr, id);
372 kfree(context);
373 return NULL;
374 }
375
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600376 kref_init(&context->refcount);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 context->id = id;
378 context->dev_priv = dev_priv;
379
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600380 if (kgsl_sync_timeline_create(context)) {
381 idr_remove(&dev_priv->device->context_idr, id);
382 kfree(context);
383 return NULL;
384 }
385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386 return context;
387}
388
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600389/**
390 * kgsl_context_detach - Release the "master" context reference
391 * @context - The context that will be detached
392 *
393 * This is called when a context becomes unusable, because userspace
394 * has requested for it to be destroyed. The context itself may
395 * exist a bit longer until its reference count goes to zero.
396 * Other code referencing the context can detect that it has been
397 * detached because the context id will be set to KGSL_CONTEXT_INVALID.
398 */
399void
400kgsl_context_detach(struct kgsl_context *context)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401{
402 int id;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600403 struct kgsl_device *device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 if (context == NULL)
405 return;
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600406 device = context->dev_priv->device;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -0600407 trace_kgsl_context_detach(device, context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408 id = context->id;
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600409
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600410 if (device->ftbl->drawctxt_destroy)
411 device->ftbl->drawctxt_destroy(device, context);
412 /*device specific drawctxt_destroy MUST clean up devctxt */
413 BUG_ON(context->devctxt);
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600414 /*
415 * Cancel events after the device-specific context is
416 * destroyed, to avoid possibly freeing memory while
417 * it is still in use by the GPU.
418 */
419 kgsl_cancel_events_ctxt(device, context);
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600420 idr_remove(&device->context_idr, id);
421 context->id = KGSL_CONTEXT_INVALID;
422 kgsl_context_put(context);
423}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600425void
426kgsl_context_destroy(struct kref *kref)
427{
428 struct kgsl_context *context = container_of(kref, struct kgsl_context,
429 refcount);
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600430 kgsl_sync_timeline_destroy(context);
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600431 kfree(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700432}
433
Jeremy Gebben84d75d02012-03-01 14:47:45 -0700434void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600436 struct kgsl_device *device = container_of(work, struct kgsl_device,
437 ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700438 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 uint32_t ts_processed;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700440 unsigned int id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600442 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443
Jordan Croused4bc9d22011-11-17 13:39:21 -0700444 /* Process expired events */
445 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
Jeremy Gebben731dac52012-05-10 11:13:42 -0600446 ts_processed = kgsl_readtimestamp(device, event->context,
447 KGSL_TIMESTAMP_RETIRED);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700448 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700449 continue;
450
451 id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
Jordan Croused4bc9d22011-11-17 13:39:21 -0700452
453 if (event->func)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700454 event->func(device, event->priv, id, ts_processed);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700455
Carter Cooperb50d03c2013-01-08 11:41:02 -0700456 if (event->context)
457 kgsl_context_put(event->context);
458
Jordan Croused4bc9d22011-11-17 13:39:21 -0700459 list_del(&event->list);
460 kfree(event);
461 }
462
Jordan Crouse313faf62012-11-20 15:12:28 -0700463 /* Send the next pending event for each context to the device */
464 if (device->ftbl->next_event) {
465 unsigned int id = KGSL_MEMSTORE_GLOBAL;
466
467 list_for_each_entry(event, &device->events, list) {
468
469 if (!event->context)
470 continue;
471
472 if (event->context->id != id) {
473 device->ftbl->next_event(device, event);
474 id = event->context->id;
475 }
476 }
477 }
478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 mutex_unlock(&device->mutex);
480}
Jeremy Gebben84d75d02012-03-01 14:47:45 -0700481EXPORT_SYMBOL(kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482
483static void kgsl_check_idle_locked(struct kgsl_device *device)
484{
485 if (device->pwrctrl.nap_allowed == true &&
486 device->state == KGSL_STATE_ACTIVE &&
487 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700488 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Lucille Sylvester721f7e72012-08-21 16:31:26 -0600489 kgsl_pwrscale_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 if (kgsl_pwrctrl_sleep(device) != 0)
491 mod_timer(&device->idle_timer,
492 jiffies +
493 device->pwrctrl.interval_timeout);
494 }
495}
496
497static void kgsl_check_idle(struct kgsl_device *device)
498{
499 mutex_lock(&device->mutex);
500 kgsl_check_idle_locked(device);
501 mutex_unlock(&device->mutex);
502}
503
504struct kgsl_device *kgsl_get_device(int dev_idx)
505{
506 int i;
507 struct kgsl_device *ret = NULL;
508
509 mutex_lock(&kgsl_driver.devlock);
510
511 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
512 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
513 ret = kgsl_driver.devp[i];
514 break;
515 }
516 }
517
518 mutex_unlock(&kgsl_driver.devlock);
519 return ret;
520}
521EXPORT_SYMBOL(kgsl_get_device);
522
523static struct kgsl_device *kgsl_get_minor(int minor)
524{
525 struct kgsl_device *ret = NULL;
526
527 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
528 return NULL;
529
530 mutex_lock(&kgsl_driver.devlock);
531 ret = kgsl_driver.devp[minor];
532 mutex_unlock(&kgsl_driver.devlock);
533
534 return ret;
535}
536
537int kgsl_register_ts_notifier(struct kgsl_device *device,
538 struct notifier_block *nb)
539{
540 BUG_ON(device == NULL);
541 return atomic_notifier_chain_register(&device->ts_notifier_list,
542 nb);
543}
544EXPORT_SYMBOL(kgsl_register_ts_notifier);
545
546int kgsl_unregister_ts_notifier(struct kgsl_device *device,
547 struct notifier_block *nb)
548{
549 BUG_ON(device == NULL);
550 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
551 nb);
552}
553EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
554
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700555int kgsl_check_timestamp(struct kgsl_device *device,
556 struct kgsl_context *context, unsigned int timestamp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700557{
558 unsigned int ts_processed;
559
Jeremy Gebben731dac52012-05-10 11:13:42 -0600560 ts_processed = kgsl_readtimestamp(device, context,
561 KGSL_TIMESTAMP_RETIRED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562
Jordan Crousee6239dd2011-11-17 13:39:21 -0700563 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564}
565EXPORT_SYMBOL(kgsl_check_timestamp);
566
567static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
568{
569 int status = -EINVAL;
570 unsigned int nap_allowed_saved;
571 struct kgsl_pwrscale_policy *policy_saved;
572
573 if (!device)
574 return -EINVAL;
575
576 KGSL_PWR_WARN(device, "suspend start\n");
577
578 mutex_lock(&device->mutex);
579 nap_allowed_saved = device->pwrctrl.nap_allowed;
580 device->pwrctrl.nap_allowed = false;
581 policy_saved = device->pwrscale.policy;
582 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700583 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700584 /* Make sure no user process is waiting for a timestamp *
585 * before supending */
586 if (device->active_cnt != 0) {
587 mutex_unlock(&device->mutex);
588 wait_for_completion(&device->suspend_gate);
589 mutex_lock(&device->mutex);
590 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800591 /* Don't let the timer wake us during suspended sleep. */
592 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593 switch (device->state) {
594 case KGSL_STATE_INIT:
595 break;
596 case KGSL_STATE_ACTIVE:
597 /* Wait for the device to become idle */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600598 device->ftbl->idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 case KGSL_STATE_NAP:
600 case KGSL_STATE_SLEEP:
601 /* Get the completion ready to be waited upon. */
602 INIT_COMPLETION(device->hwaccess_gate);
603 device->ftbl->suspend_context(device);
604 device->ftbl->stop(device);
Suman Tatiraju48e72762012-05-03 11:12:03 -0700605 pm_qos_update_request(&device->pm_qos_req_dma,
606 PM_QOS_DEFAULT_VALUE);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700607 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700609 case KGSL_STATE_SLUMBER:
610 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700611 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700612 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 default:
614 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
615 device->id);
616 goto end;
617 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700618 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619 device->pwrctrl.nap_allowed = nap_allowed_saved;
620 device->pwrscale.policy = policy_saved;
621 status = 0;
622
623end:
624 mutex_unlock(&device->mutex);
625 KGSL_PWR_WARN(device, "suspend end\n");
626 return status;
627}
628
629static int kgsl_resume_device(struct kgsl_device *device)
630{
631 int status = -EINVAL;
632
633 if (!device)
634 return -EINVAL;
635
636 KGSL_PWR_WARN(device, "resume start\n");
637 mutex_lock(&device->mutex);
638 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700639 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700640 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 complete_all(&device->hwaccess_gate);
642 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700643 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 KGSL_PWR_WARN(device, "resume end\n");
647 return status;
648}
649
650static int kgsl_suspend(struct device *dev)
651{
652
653 pm_message_t arg = {0};
654 struct kgsl_device *device = dev_get_drvdata(dev);
655 return kgsl_suspend_device(device, arg);
656}
657
658static int kgsl_resume(struct device *dev)
659{
660 struct kgsl_device *device = dev_get_drvdata(dev);
661 return kgsl_resume_device(device);
662}
663
664static int kgsl_runtime_suspend(struct device *dev)
665{
666 return 0;
667}
668
669static int kgsl_runtime_resume(struct device *dev)
670{
671 return 0;
672}
673
674const struct dev_pm_ops kgsl_pm_ops = {
675 .suspend = kgsl_suspend,
676 .resume = kgsl_resume,
677 .runtime_suspend = kgsl_runtime_suspend,
678 .runtime_resume = kgsl_runtime_resume,
679};
680EXPORT_SYMBOL(kgsl_pm_ops);
681
682void kgsl_early_suspend_driver(struct early_suspend *h)
683{
684 struct kgsl_device *device = container_of(h,
685 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700686 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530687 mutex_lock(&device->mutex);
Lucille Sylvester4c889b72012-08-03 11:25:25 -0600688 device->pwrctrl.restore_slumber = true;
Lucille Sylvester344e4622012-01-18 15:53:21 -0700689 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700690 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530691 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700692 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693}
694EXPORT_SYMBOL(kgsl_early_suspend_driver);
695
696int kgsl_suspend_driver(struct platform_device *pdev,
697 pm_message_t state)
698{
699 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
700 return kgsl_suspend_device(device, state);
701}
702EXPORT_SYMBOL(kgsl_suspend_driver);
703
704int kgsl_resume_driver(struct platform_device *pdev)
705{
706 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
707 return kgsl_resume_device(device);
708}
709EXPORT_SYMBOL(kgsl_resume_driver);
710
711void kgsl_late_resume_driver(struct early_suspend *h)
712{
713 struct kgsl_device *device = container_of(h,
714 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700715 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530716 mutex_lock(&device->mutex);
Lucille Sylvester4c889b72012-08-03 11:25:25 -0600717 device->pwrctrl.restore_slumber = false;
Nilesh Shah94bdf2f2012-05-02 22:42:57 +0530718 if (device->pwrscale.policy == NULL)
719 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Suman Tatiraju3005cdd2012-03-19 14:38:11 -0700720 kgsl_pwrctrl_wake(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530721 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700722 kgsl_check_idle(device);
723 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700724}
725EXPORT_SYMBOL(kgsl_late_resume_driver);
726
727/* file operations */
728static struct kgsl_process_private *
729kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
730{
731 struct kgsl_process_private *private;
732
733 mutex_lock(&kgsl_driver.process_mutex);
734 list_for_each_entry(private, &kgsl_driver.process_list, list) {
735 if (private->pid == task_tgid_nr(current)) {
736 private->refcnt++;
737 goto out;
738 }
739 }
740
741 /* no existing process private found for this dev_priv, create one */
742 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
743 if (private == NULL) {
744 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
745 sizeof(struct kgsl_process_private));
746 goto out;
747 }
748
749 spin_lock_init(&private->mem_lock);
750 private->refcnt = 1;
751 private->pid = task_tgid_nr(current);
Jordan Crousec9559e42012-04-05 16:55:56 -0600752 private->mem_rb = RB_ROOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600754 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700755 {
756 unsigned long pt_name;
757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700758 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700759 private->pagetable = kgsl_mmu_getpagetable(pt_name);
760 if (private->pagetable == NULL) {
761 kfree(private);
762 private = NULL;
763 goto out;
764 }
765 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766
767 list_add(&private->list, &kgsl_driver.process_list);
768
769 kgsl_process_init_sysfs(private);
Jeremy Gebbenddf93012012-09-25 10:57:38 -0600770 kgsl_process_init_debugfs(private);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700771
772out:
773 mutex_unlock(&kgsl_driver.process_mutex);
774 return private;
775}
776
777static void
778kgsl_put_process_private(struct kgsl_device *device,
779 struct kgsl_process_private *private)
780{
781 struct kgsl_mem_entry *entry = NULL;
Jordan Crousec9559e42012-04-05 16:55:56 -0600782 struct rb_node *node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700783
784 if (!private)
785 return;
786
787 mutex_lock(&kgsl_driver.process_mutex);
788
789 if (--private->refcnt)
790 goto unlock;
791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792 kgsl_process_uninit_sysfs(private);
Jeremy Gebbenddf93012012-09-25 10:57:38 -0600793 debugfs_remove_recursive(private->debug_root);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794
795 list_del(&private->list);
796
Jordan Crousec9559e42012-04-05 16:55:56 -0600797 for (node = rb_first(&private->mem_rb); node; ) {
798 entry = rb_entry(node, struct kgsl_mem_entry, node);
799 node = rb_next(&entry->node);
800
801 rb_erase(&entry->node, &private->mem_rb);
Jordan Crouse00714012012-03-16 14:53:40 -0600802 kgsl_mem_entry_detach_process(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700803 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700804 kgsl_mmu_putpagetable(private->pagetable);
805 kfree(private);
806unlock:
807 mutex_unlock(&kgsl_driver.process_mutex);
808}
809
810static int kgsl_release(struct inode *inodep, struct file *filep)
811{
812 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600813 struct kgsl_device_private *dev_priv = filep->private_data;
814 struct kgsl_process_private *private = dev_priv->process_priv;
815 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 struct kgsl_context *context;
817 int next = 0;
818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 filep->private_data = NULL;
820
821 mutex_lock(&device->mutex);
822 kgsl_check_suspended(device);
823
824 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600825 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 if (context == NULL)
827 break;
828
Jeremy Gebben9ad86922012-05-08 15:33:23 -0600829 if (context->dev_priv == dev_priv)
830 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700831
832 next = next + 1;
833 }
Jeremy Gebben1384a6a2012-05-17 14:34:17 -0600834 /*
835 * Clean up any to-be-freed entries that belong to this
836 * process and this device. This is done after the context
837 * are destroyed to avoid possibly freeing memory while
838 * it is still in use by the GPU.
839 */
840 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700841
842 device->open_count--;
843 if (device->open_count == 0) {
844 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700845 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700847
848 mutex_unlock(&device->mutex);
849 kfree(dev_priv);
850
851 kgsl_put_process_private(device, private);
852
853 pm_runtime_put(device->parentdev);
854 return result;
855}
856
857static int kgsl_open(struct inode *inodep, struct file *filep)
858{
859 int result;
860 struct kgsl_device_private *dev_priv;
861 struct kgsl_device *device;
862 unsigned int minor = iminor(inodep);
863
864 device = kgsl_get_minor(minor);
865 BUG_ON(device == NULL);
866
867 if (filep->f_flags & O_EXCL) {
868 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
869 return -EBUSY;
870 }
871
872 result = pm_runtime_get_sync(device->parentdev);
873 if (result < 0) {
874 KGSL_DRV_ERR(device,
875 "Runtime PM: Unable to wake up the device, rc = %d\n",
876 result);
877 return result;
878 }
879 result = 0;
880
881 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
882 if (dev_priv == NULL) {
883 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
884 sizeof(struct kgsl_device_private));
885 result = -ENOMEM;
886 goto err_pmruntime;
887 }
888
889 dev_priv->device = device;
890 filep->private_data = dev_priv;
891
892 /* Get file (per process) private struct */
893 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
894 if (dev_priv->process_priv == NULL) {
895 result = -ENOMEM;
896 goto err_freedevpriv;
897 }
898
899 mutex_lock(&device->mutex);
900 kgsl_check_suspended(device);
901
902 if (device->open_count == 0) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700903 kgsl_sharedmem_set(&device->memstore, 0, 0,
904 device->memstore.size);
905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 result = device->ftbl->start(device, true);
907
908 if (result) {
909 mutex_unlock(&device->mutex);
910 goto err_putprocess;
911 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700912 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700913 }
914 device->open_count++;
915 mutex_unlock(&device->mutex);
916
917 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
918 device->name, kgsl_mmu_enabled() ? "on" : "off",
919 kgsl_pagetable_count);
920
921 return result;
922
923err_putprocess:
924 kgsl_put_process_private(device, dev_priv->process_priv);
925err_freedevpriv:
926 filep->private_data = NULL;
927 kfree(dev_priv);
928err_pmruntime:
929 pm_runtime_put(device->parentdev);
930 return result;
931}
932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933/*call with private->mem_lock locked */
934struct kgsl_mem_entry *
935kgsl_sharedmem_find_region(struct kgsl_process_private *private,
Jordan Crousec9559e42012-04-05 16:55:56 -0600936 unsigned int gpuaddr, size_t size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937{
Jordan Crousec9559e42012-04-05 16:55:56 -0600938 struct rb_node *node = private->mem_rb.rb_node;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939
Jordan Crousee22e21d2012-07-23 14:34:06 -0600940 if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
941 return NULL;
942
Jordan Crousec9559e42012-04-05 16:55:56 -0600943 while (node != NULL) {
944 struct kgsl_mem_entry *entry;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945
Jordan Crousec9559e42012-04-05 16:55:56 -0600946 entry = rb_entry(node, struct kgsl_mem_entry, node);
947
948
949 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size))
950 return entry;
951
952 if (gpuaddr < entry->memdesc.gpuaddr)
953 node = node->rb_left;
954 else if (gpuaddr >=
955 (entry->memdesc.gpuaddr + entry->memdesc.size))
956 node = node->rb_right;
957 else {
958 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700959 }
960 }
961
Jordan Crousec9559e42012-04-05 16:55:56 -0600962 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963}
964EXPORT_SYMBOL(kgsl_sharedmem_find_region);
965
Jordan Crousec9559e42012-04-05 16:55:56 -0600966/*call with private->mem_lock locked */
967static inline struct kgsl_mem_entry *
968kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
969{
970 return kgsl_sharedmem_find_region(private, gpuaddr, 1);
971}
972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973/*call all ioctl sub functions with driver locked*/
974static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
975 unsigned int cmd, void *data)
976{
977 int result = 0;
978 struct kgsl_device_getproperty *param = data;
979
980 switch (param->type) {
981 case KGSL_PROP_VERSION:
982 {
983 struct kgsl_version version;
984 if (param->sizebytes != sizeof(version)) {
985 result = -EINVAL;
986 break;
987 }
988
989 version.drv_major = KGSL_VERSION_MAJOR;
990 version.drv_minor = KGSL_VERSION_MINOR;
991 version.dev_major = dev_priv->device->ver_major;
992 version.dev_minor = dev_priv->device->ver_minor;
993
994 if (copy_to_user(param->value, &version, sizeof(version)))
995 result = -EFAULT;
996
997 break;
998 }
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700999 case KGSL_PROP_GPU_RESET_STAT:
1000 {
1001 /* Return reset status of given context and clear it */
1002 uint32_t id;
1003 struct kgsl_context *context;
1004
1005 if (param->sizebytes != sizeof(unsigned int)) {
1006 result = -EINVAL;
1007 break;
1008 }
1009 /* We expect the value passed in to contain the context id */
1010 if (copy_from_user(&id, param->value,
1011 sizeof(unsigned int))) {
1012 result = -EFAULT;
1013 break;
1014 }
1015 context = kgsl_find_context(dev_priv, id);
1016 if (!context) {
1017 result = -EINVAL;
1018 break;
1019 }
1020 /*
1021 * Copy the reset status to value which also serves as
1022 * the out parameter
1023 */
1024 if (copy_to_user(param->value, &(context->reset_status),
1025 sizeof(unsigned int))) {
1026 result = -EFAULT;
1027 break;
1028 }
1029 /* Clear reset status once its been queried */
1030 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
1031 break;
1032 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033 default:
1034 result = dev_priv->device->ftbl->getproperty(
1035 dev_priv->device, param->type,
1036 param->value, param->sizebytes);
1037 }
1038
1039
1040 return result;
1041}
1042
Jordan Crouseed7dd7f2012-03-29 13:16:02 -06001043static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
1044 unsigned int cmd, void *data)
1045{
1046 int result = 0;
1047 /* The getproperty struct is reused for setproperty too */
1048 struct kgsl_device_getproperty *param = data;
1049
1050 if (dev_priv->device->ftbl->setproperty)
1051 result = dev_priv->device->ftbl->setproperty(
1052 dev_priv->device, param->type,
1053 param->value, param->sizebytes);
1054
1055 return result;
1056}
1057
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001058static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
1059 struct kgsl_context *context,
1060 unsigned int timestamp,
1061 unsigned int timeout)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062{
1063 int result = 0;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001064 struct kgsl_device *device = dev_priv->device;
1065 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001067 /* Set the active count so that suspend doesn't do the wrong thing */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001069 device->active_cnt++;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001071 trace_kgsl_waittimestamp_entry(device, context_id,
1072 kgsl_readtimestamp(device, context,
1073 KGSL_TIMESTAMP_RETIRED),
1074 timestamp, timeout);
Norman Geed7402ff2011-10-28 08:51:11 -06001075
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001076 result = device->ftbl->waittimestamp(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001077 context, timestamp, timeout);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001079 trace_kgsl_waittimestamp_exit(device,
1080 kgsl_readtimestamp(device, context,
1081 KGSL_TIMESTAMP_RETIRED),
1082 result);
Norman Geed7402ff2011-10-28 08:51:11 -06001083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 /* Fire off any pending suspend operations that are in flight */
1085
1086 INIT_COMPLETION(dev_priv->device->suspend_gate);
1087 dev_priv->device->active_cnt--;
1088 complete(&dev_priv->device->suspend_gate);
1089
1090 return result;
1091}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001092
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001093static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
1094 *dev_priv, unsigned int cmd,
1095 void *data)
1096{
1097 struct kgsl_device_waittimestamp *param = data;
1098
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001099 return _device_waittimestamp(dev_priv, NULL,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001100 param->timestamp, param->timeout);
1101}
1102
1103static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
1104 *dev_priv, unsigned int cmd,
1105 void *data)
1106{
1107 struct kgsl_device_waittimestamp_ctxtid *param = data;
1108 struct kgsl_context *context;
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001109 int result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001110
1111 context = kgsl_find_context(dev_priv, param->context_id);
1112 if (context == NULL) {
1113 KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
1114 param->context_id);
1115 return -EINVAL;
1116 }
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001117 /*
1118 * A reference count is needed here, because waittimestamp may
1119 * block with the device mutex unlocked and userspace could
1120 * request for the context to be destroyed during that time.
1121 */
1122 kgsl_context_get(context);
1123 result = _device_waittimestamp(dev_priv, context,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001124 param->timestamp, param->timeout);
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001125 kgsl_context_put(context);
1126 return result;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001127}
1128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
1130 unsigned int cmd, void *data)
1131{
1132 int result = 0;
1133 struct kgsl_ringbuffer_issueibcmds *param = data;
1134 struct kgsl_ibdesc *ibdesc;
1135 struct kgsl_context *context;
1136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1138 if (context == NULL) {
1139 result = -EINVAL;
1140 KGSL_DRV_ERR(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001141 "invalid context_id %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 param->drawctxt_id);
1143 goto done;
1144 }
1145
1146 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
1147 KGSL_DRV_INFO(dev_priv->device,
1148 "Using IB list mode for ib submission, numibs: %d\n",
1149 param->numibs);
1150 if (!param->numibs) {
1151 KGSL_DRV_ERR(dev_priv->device,
1152 "Invalid numibs as parameter: %d\n",
1153 param->numibs);
1154 result = -EINVAL;
1155 goto done;
1156 }
1157
Jordan Crouse834c8592012-07-24 10:06:35 -06001158 /*
1159 * Put a reasonable upper limit on the number of IBs that can be
1160 * submitted
1161 */
1162
1163 if (param->numibs > 10000) {
1164 KGSL_DRV_ERR(dev_priv->device,
1165 "Too many IBs submitted. count: %d max 10000\n",
1166 param->numibs);
1167 result = -EINVAL;
1168 goto done;
1169 }
1170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
1172 GFP_KERNEL);
1173 if (!ibdesc) {
1174 KGSL_MEM_ERR(dev_priv->device,
1175 "kzalloc(%d) failed\n",
1176 sizeof(struct kgsl_ibdesc) * param->numibs);
1177 result = -ENOMEM;
1178 goto done;
1179 }
1180
1181 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
1182 sizeof(struct kgsl_ibdesc) * param->numibs)) {
1183 result = -EFAULT;
1184 KGSL_DRV_ERR(dev_priv->device,
1185 "copy_from_user failed\n");
1186 goto free_ibdesc;
1187 }
1188 } else {
1189 KGSL_DRV_INFO(dev_priv->device,
1190 "Using single IB submission mode for ib submission\n");
1191 /* If user space driver is still using the old mode of
1192 * submitting single ib then we need to support that as well */
1193 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
1194 if (!ibdesc) {
1195 KGSL_MEM_ERR(dev_priv->device,
1196 "kzalloc(%d) failed\n",
1197 sizeof(struct kgsl_ibdesc));
1198 result = -ENOMEM;
1199 goto done;
1200 }
1201 ibdesc[0].gpuaddr = param->ibdesc_addr;
1202 ibdesc[0].sizedwords = param->numibs;
1203 param->numibs = 1;
1204 }
1205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
1207 context,
1208 ibdesc,
1209 param->numibs,
1210 &param->timestamp,
1211 param->flags);
1212
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001213 trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
Wei Zouc8c01632012-03-24 17:27:26 -07001214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001215free_ibdesc:
1216 kfree(ibdesc);
1217done:
1218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 return result;
1220}
1221
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001222static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
1223 struct kgsl_context *context, unsigned int type,
1224 unsigned int *timestamp)
1225{
Jeremy Gebben731dac52012-05-10 11:13:42 -06001226 *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001227
1228 trace_kgsl_readtimestamp(dev_priv->device,
1229 context ? context->id : KGSL_MEMSTORE_GLOBAL,
1230 type, *timestamp);
1231
1232 return 0;
1233}
1234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1236 *dev_priv, unsigned int cmd,
1237 void *data)
1238{
1239 struct kgsl_cmdstream_readtimestamp *param = data;
1240
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001241 return _cmdstream_readtimestamp(dev_priv, NULL,
1242 param->type, &param->timestamp);
1243}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001244
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001245static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
1246 *dev_priv, unsigned int cmd,
1247 void *data)
1248{
1249 struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
1250 struct kgsl_context *context;
Norman Geed7402ff2011-10-28 08:51:11 -06001251
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001252 context = kgsl_find_context(dev_priv, param->context_id);
1253 if (context == NULL) {
1254 KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
1255 param->context_id);
1256 return -EINVAL;
1257 }
1258
1259 return _cmdstream_readtimestamp(dev_priv, context,
1260 param->type, &param->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001261}
1262
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001263static void kgsl_freemem_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001264 void *priv, u32 id, u32 timestamp)
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001265{
1266 struct kgsl_mem_entry *entry = priv;
1267 spin_lock(&entry->priv->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -06001268 rb_erase(&entry->node, &entry->priv->mem_rb);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001269 spin_unlock(&entry->priv->mem_lock);
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001270 trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
Jordan Crouse00714012012-03-16 14:53:40 -06001271 kgsl_mem_entry_detach_process(entry);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001272}
1273
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001274static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
1275 unsigned int gpuaddr, struct kgsl_context *context,
1276 unsigned int timestamp, unsigned int type)
1277{
1278 int result = 0;
1279 struct kgsl_mem_entry *entry = NULL;
1280 struct kgsl_device *device = dev_priv->device;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001281 unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
1282
1283 spin_lock(&dev_priv->process_priv->mem_lock);
1284 entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
1285 spin_unlock(&dev_priv->process_priv->mem_lock);
1286
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001287 if (!entry) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001288 KGSL_DRV_ERR(dev_priv->device,
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001289 "invalid gpuaddr %08x\n", gpuaddr);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001290 result = -EINVAL;
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001291 goto done;
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001292 }
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001293 trace_kgsl_mem_timestamp_queue(device, entry, context_id,
1294 kgsl_readtimestamp(device, context,
1295 KGSL_TIMESTAMP_RETIRED),
1296 timestamp);
1297 result = kgsl_add_event(dev_priv->device, context_id, timestamp,
1298 kgsl_freemem_event_cb, entry, dev_priv);
1299done:
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001300 return result;
1301}
1302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1304 *dev_priv, unsigned int cmd,
1305 void *data)
1306{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 struct kgsl_cmdstream_freememontimestamp *param = data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001309 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1310 NULL, param->timestamp, param->type);
1311}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001313static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
1314 struct kgsl_device_private
1315 *dev_priv, unsigned int cmd,
1316 void *data)
1317{
1318 struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
1319 struct kgsl_context *context;
Jeremy Gebbena5859272012-03-01 12:46:28 -07001320
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001321 context = kgsl_find_context(dev_priv, param->context_id);
1322 if (context == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 KGSL_DRV_ERR(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001324 "invalid drawctxt context_id %d\n", param->context_id);
1325 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 }
1327
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001328 return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
1329 context, param->timestamp, param->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330}
1331
1332static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1333 unsigned int cmd, void *data)
1334{
1335 int result = 0;
1336 struct kgsl_drawctxt_create *param = data;
1337 struct kgsl_context *context = NULL;
1338
1339 context = kgsl_create_context(dev_priv);
1340
1341 if (context == NULL) {
1342 result = -ENOMEM;
1343 goto done;
1344 }
1345
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001346 if (dev_priv->device->ftbl->drawctxt_create) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001347 result = dev_priv->device->ftbl->drawctxt_create(
1348 dev_priv->device, dev_priv->process_priv->pagetable,
1349 context, param->flags);
Jeremy Gebben4a3756c2012-05-08 16:51:43 -06001350 if (result)
1351 goto done;
1352 }
1353 trace_kgsl_context_create(dev_priv->device, context, param->flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354 param->drawctxt_id = context->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355done:
1356 if (result && context)
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001357 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358
1359 return result;
1360}
1361
1362static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1363 unsigned int cmd, void *data)
1364{
1365 int result = 0;
1366 struct kgsl_drawctxt_destroy *param = data;
1367 struct kgsl_context *context;
1368
1369 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1370
1371 if (context == NULL) {
1372 result = -EINVAL;
1373 goto done;
1374 }
1375
Jeremy Gebben9ad86922012-05-08 15:33:23 -06001376 kgsl_context_detach(context);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377done:
1378 return result;
1379}
1380
1381static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1382 unsigned int cmd, void *data)
1383{
1384 int result = 0;
1385 struct kgsl_sharedmem_free *param = data;
1386 struct kgsl_process_private *private = dev_priv->process_priv;
1387 struct kgsl_mem_entry *entry = NULL;
1388
1389 spin_lock(&private->mem_lock);
1390 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1391 if (entry)
Jordan Crousec9559e42012-04-05 16:55:56 -06001392 rb_erase(&entry->node, &private->mem_rb);
1393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394 spin_unlock(&private->mem_lock);
1395
1396 if (entry) {
Jeremy Gebbena5859272012-03-01 12:46:28 -07001397 trace_kgsl_mem_free(entry);
Jordan Crouse00714012012-03-16 14:53:40 -06001398 kgsl_mem_entry_detach_process(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 } else {
1400 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1401 result = -EINVAL;
1402 }
1403
1404 return result;
1405}
1406
1407static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1408{
1409 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410
1411 down_read(&current->mm->mmap_sem);
1412 vma = find_vma(current->mm, addr);
1413 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001414 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 return vma;
1418}
1419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420static inline int _check_region(unsigned long start, unsigned long size,
1421 uint64_t len)
1422{
1423 uint64_t end = ((uint64_t) start) + size;
1424 return (end > len);
1425}
1426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001427static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1428 unsigned long *vstart, struct file **filep)
1429{
1430 struct file *fbfile;
1431 int ret = 0;
1432 dev_t rdev;
1433 struct fb_info *info;
1434
1435 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001436#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 if (!get_pmem_file(fd, start, vstart, len, filep))
1438 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001439#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001440
1441 fbfile = fget(fd);
1442 if (fbfile == NULL) {
1443 KGSL_CORE_ERR("fget_light failed\n");
1444 return -1;
1445 }
1446
1447 rdev = fbfile->f_dentry->d_inode->i_rdev;
1448 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1449 if (info) {
1450 *start = info->fix.smem_start;
1451 *len = info->fix.smem_len;
1452 *vstart = (unsigned long)__va(info->fix.smem_start);
1453 ret = 0;
1454 } else {
1455 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1456 MINOR(rdev));
1457 ret = -1;
1458 }
1459
1460 fput(fbfile);
1461
1462 return ret;
1463}
1464
1465static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1466 struct kgsl_pagetable *pagetable,
1467 unsigned int fd, unsigned int offset,
1468 size_t size)
1469{
1470 int ret;
1471 unsigned long phys, virt, len;
1472 struct file *filep;
1473
1474 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1475 if (ret)
1476 return ret;
1477
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001478 ret = -ERANGE;
1479
Wei Zou4061c0b2011-07-08 10:24:22 -07001480 if (phys == 0) {
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001481 KGSL_CORE_ERR("kgsl_get_phys_file returned phys=0\n");
Wei Zou4061c0b2011-07-08 10:24:22 -07001482 goto err;
1483 }
1484
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001485 /* Make sure the length of the region, the offset and the desired
1486 * size are all page aligned or bail
1487 */
1488 if ((len & ~PAGE_MASK) ||
1489 (offset & ~PAGE_MASK) ||
1490 (size & ~PAGE_MASK)) {
1491 KGSL_CORE_ERR("length %lu, offset %u or size %u "
1492 "is not page aligned\n",
1493 len, offset, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 goto err;
1495 }
1496
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001497 /* The size or offset can never be greater than the PMEM length */
1498 if (offset >= len || size > len) {
1499 KGSL_CORE_ERR("offset %u or size %u "
1500 "exceeds pmem length %lu\n",
1501 offset, size, len);
1502 goto err;
1503 }
1504
1505 /* If size is 0, then adjust it to default to the size of the region
1506 * minus the offset. If size isn't zero, then make sure that it will
1507 * fit inside of the region.
1508 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 if (size == 0)
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001510 size = len - offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001512 else if (_check_region(offset, size, len))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 goto err;
1514
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001515 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001516
1517 entry->memdesc.pagetable = pagetable;
1518 entry->memdesc.size = size;
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001519 entry->memdesc.physaddr = phys + offset;
1520 entry->memdesc.hostptr = (void *) (virt + offset);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001521
Ajay Dudani62dd83e2012-09-11 16:38:13 -06001522 ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001523 if (ret)
1524 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525
1526 return 0;
1527err:
Jordan Crousefd978432011-09-02 14:34:32 -06001528#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001530#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001531 return ret;
1532}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001533
Jordan Croused17e9aa2011-10-12 16:57:48 -06001534static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1535 void *addr, int size)
1536{
1537 int i;
1538 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1539 unsigned long paddr = (unsigned long) addr;
1540
Jordan Crousea652a072012-04-06 16:26:33 -06001541 memdesc->sg = kgsl_sg_alloc(sglen);
1542
Jordan Croused17e9aa2011-10-12 16:57:48 -06001543 if (memdesc->sg == NULL)
1544 return -ENOMEM;
1545
1546 memdesc->sglen = sglen;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001547 memdesc->sglen_alloc = sglen;
1548
Jordan Croused17e9aa2011-10-12 16:57:48 -06001549 sg_init_table(memdesc->sg, sglen);
1550
1551 spin_lock(&current->mm->page_table_lock);
1552
1553 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1554 struct page *page;
1555 pmd_t *ppmd;
1556 pte_t *ppte;
1557 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1558
1559 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1560 goto err;
1561
Steve Mucklef132c6c2012-06-06 18:30:57 -07001562 ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001563 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1564 goto err;
1565
1566 ppte = pte_offset_map(ppmd, paddr);
1567 if (ppte == NULL)
1568 goto err;
1569
1570 page = pfn_to_page(pte_pfn(*ppte));
1571 if (!page)
1572 goto err;
1573
1574 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1575 pte_unmap(ppte);
1576 }
1577
1578 spin_unlock(&current->mm->page_table_lock);
1579
1580 return 0;
1581
1582err:
1583 spin_unlock(&current->mm->page_table_lock);
Jordan Crousea652a072012-04-06 16:26:33 -06001584 kgsl_sg_free(memdesc->sg, sglen);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001585 memdesc->sg = NULL;
1586
1587 return -EINVAL;
1588}
1589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1591 struct kgsl_pagetable *pagetable,
1592 void *hostptr, unsigned int offset,
1593 size_t size)
1594{
1595 struct vm_area_struct *vma;
1596 unsigned int len;
1597
1598 down_read(&current->mm->mmap_sem);
1599 vma = find_vma(current->mm, (unsigned int) hostptr);
1600 up_read(&current->mm->mmap_sem);
1601
1602 if (!vma) {
1603 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1604 return -EINVAL;
1605 }
1606
1607 /* We don't necessarily start at vma->vm_start */
1608 len = vma->vm_end - (unsigned long) hostptr;
1609
1610 if (offset >= len)
1611 return -EINVAL;
1612
1613 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1614 !KGSL_IS_PAGE_ALIGNED(len)) {
1615 KGSL_CORE_ERR("user address len(%u)"
1616 "and start(%p) must be page"
1617 "aligned\n", len, hostptr);
1618 return -EINVAL;
1619 }
1620
1621 if (size == 0)
1622 size = len;
1623
1624 /* Adjust the size of the region to account for the offset */
1625 size += offset & ~PAGE_MASK;
1626
1627 size = ALIGN(size, PAGE_SIZE);
1628
1629 if (_check_region(offset & PAGE_MASK, size, len)) {
1630 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1631 "than region length %d\n",
1632 offset & PAGE_MASK, size, len);
1633 return -EINVAL;
1634 }
1635
1636 entry->memdesc.pagetable = pagetable;
1637 entry->memdesc.size = size;
1638 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639
Jordan Croused17e9aa2011-10-12 16:57:48 -06001640 return memdesc_sg_virt(&entry->memdesc,
1641 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642}
1643
1644#ifdef CONFIG_ASHMEM
1645static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1646 struct kgsl_pagetable *pagetable,
1647 int fd, void *hostptr, size_t size)
1648{
1649 int ret;
1650 struct vm_area_struct *vma;
1651 struct file *filep, *vmfile;
1652 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001653 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654
Jordan Crouse2c542b62011-07-26 08:30:20 -06001655 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 if (vma == NULL)
1657 return -EINVAL;
1658
Jordan Crouse2c542b62011-07-26 08:30:20 -06001659 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1660 KGSL_CORE_ERR("Invalid vma region\n");
1661 return -EINVAL;
1662 }
1663
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001664 len = vma->vm_end - vma->vm_start;
1665
1666 if (size == 0)
1667 size = len;
1668
1669 if (size != len) {
1670 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1671 size, hostptr);
1672 return -EINVAL;
1673 }
1674
1675 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1676
1677 if (ret) {
1678 KGSL_CORE_ERR("get_ashmem_file failed\n");
1679 return ret;
1680 }
1681
1682 if (vmfile != vma->vm_file) {
1683 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1684 ret = -EINVAL;
1685 goto err;
1686 }
1687
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001688 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 entry->memdesc.pagetable = pagetable;
1690 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1691 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001692
1693 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1694 if (ret)
1695 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696
1697 return 0;
1698
1699err:
1700 put_ashmem_file(filep);
1701 return ret;
1702}
1703#else
1704static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1705 struct kgsl_pagetable *pagetable,
1706 int fd, void *hostptr, size_t size)
1707{
1708 return -EINVAL;
1709}
1710#endif
1711
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001712static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1713 struct kgsl_pagetable *pagetable, int fd)
1714{
1715 struct ion_handle *handle;
1716 struct scatterlist *s;
Laura Abbottb14ed962012-01-30 14:18:08 -08001717 struct sg_table *sg_table;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001718
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06001719 if (IS_ERR_OR_NULL(kgsl_ion_client))
1720 return -ENODEV;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001721
Laura Abbottb14ed962012-01-30 14:18:08 -08001722 handle = ion_import_dma_buf(kgsl_ion_client, fd);
Ranjhith Kalisamy0d2e14f2012-08-14 19:49:39 +05301723 if (IS_ERR(handle))
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001724 return PTR_ERR(handle);
Ranjhith Kalisamy0d2e14f2012-08-14 19:49:39 +05301725 else if (!handle)
1726 return -EINVAL;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001727
1728 entry->memtype = KGSL_MEM_ENTRY_ION;
1729 entry->priv_data = handle;
1730 entry->memdesc.pagetable = pagetable;
1731 entry->memdesc.size = 0;
1732
Laura Abbottb14ed962012-01-30 14:18:08 -08001733 sg_table = ion_sg_table(kgsl_ion_client, handle);
1734
1735 if (IS_ERR_OR_NULL(sg_table))
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001736 goto err;
1737
Laura Abbottb14ed962012-01-30 14:18:08 -08001738 entry->memdesc.sg = sg_table->sgl;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001739
1740 /* Calculate the size of the memdesc from the sglist */
1741
1742 entry->memdesc.sglen = 0;
1743
1744 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1745 entry->memdesc.size += s->length;
1746 entry->memdesc.sglen++;
1747 }
1748
1749 return 0;
1750err:
1751 ion_free(kgsl_ion_client, handle);
1752 return -ENOMEM;
1753}
1754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1756 unsigned int cmd, void *data)
1757{
1758 int result = -EINVAL;
1759 struct kgsl_map_user_mem *param = data;
1760 struct kgsl_mem_entry *entry = NULL;
1761 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001762 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763
1764 entry = kgsl_mem_entry_create();
1765
1766 if (entry == NULL)
1767 return -ENOMEM;
1768
Jason848741a2011-07-12 10:24:25 -07001769 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1770 memtype = KGSL_USER_MEM_TYPE_PMEM;
1771 else
1772 memtype = param->memtype;
1773
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001774 entry->memdesc.flags = param->flags;
1775
Jason848741a2011-07-12 10:24:25 -07001776 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 case KGSL_USER_MEM_TYPE_PMEM:
1778 if (param->fd == 0 || param->len == 0)
1779 break;
1780
1781 result = kgsl_setup_phys_file(entry, private->pagetable,
1782 param->fd, param->offset,
1783 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001784 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 break;
1786
1787 case KGSL_USER_MEM_TYPE_ADDR:
Harsh Vardhan Dwivedia9eb7cb2012-03-26 15:21:38 -06001788 KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type "
1789 "KGSL_USER_MEM_TYPE_ADDR is deprecated\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 if (!kgsl_mmu_enabled()) {
1791 KGSL_DRV_ERR(dev_priv->device,
1792 "Cannot map paged memory with the "
1793 "MMU disabled\n");
1794 break;
1795 }
1796
1797 if (param->hostptr == 0)
1798 break;
1799
1800 result = kgsl_setup_hostptr(entry, private->pagetable,
1801 (void *) param->hostptr,
1802 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001803 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 break;
1805
1806 case KGSL_USER_MEM_TYPE_ASHMEM:
1807 if (!kgsl_mmu_enabled()) {
1808 KGSL_DRV_ERR(dev_priv->device,
1809 "Cannot map paged memory with the "
1810 "MMU disabled\n");
1811 break;
1812 }
1813
1814 if (param->hostptr == 0)
1815 break;
1816
1817 result = kgsl_setup_ashmem(entry, private->pagetable,
1818 param->fd, (void *) param->hostptr,
1819 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001820
1821 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001823 case KGSL_USER_MEM_TYPE_ION:
1824 result = kgsl_setup_ion(entry, private->pagetable,
1825 param->fd);
1826 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 default:
Jason848741a2011-07-12 10:24:25 -07001828 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 break;
1830 }
1831
1832 if (result)
1833 goto error;
1834
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001835 if (entry->memdesc.size >= SZ_1M)
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001836 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001837 else if (entry->memdesc.size >= SZ_64K)
Jordan Crousedc67dfb2012-10-25 09:41:46 -06001838 kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -08001839
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 result = kgsl_mmu_map(private->pagetable,
1841 &entry->memdesc,
1842 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1843
1844 if (result)
1845 goto error_put_file_ptr;
1846
1847 /* Adjust the returned value for a non 4k aligned offset */
1848 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001850 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001851 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001853 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854
1855 kgsl_mem_entry_attach_process(entry, private);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001856 trace_kgsl_mem_map(entry, param->fd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001857
1858 kgsl_check_idle(dev_priv->device);
1859 return result;
1860
Jeremy Gebben53d4dd02012-05-07 15:42:00 -06001861error_put_file_ptr:
1862 switch (entry->memtype) {
1863 case KGSL_MEM_ENTRY_PMEM:
1864 case KGSL_MEM_ENTRY_ASHMEM:
1865 if (entry->priv_data)
1866 fput(entry->priv_data);
1867 break;
1868 case KGSL_MEM_ENTRY_ION:
Jeremy Gebben53d4dd02012-05-07 15:42:00 -06001869 ion_free(kgsl_ion_client, entry->priv_data);
1870 break;
1871 default:
1872 break;
1873 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001874error:
1875 kfree(entry);
1876 kgsl_check_idle(dev_priv->device);
1877 return result;
1878}
1879
1880/*This function flushes a graphics memory allocation from CPU cache
1881 *when caching is enabled with MMU*/
1882static long
1883kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1884 unsigned int cmd, void *data)
1885{
1886 int result = 0;
1887 struct kgsl_mem_entry *entry;
1888 struct kgsl_sharedmem_free *param = data;
1889 struct kgsl_process_private *private = dev_priv->process_priv;
1890
1891 spin_lock(&private->mem_lock);
1892 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1893 if (!entry) {
1894 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1895 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001896 goto done;
1897 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001898 if (!entry->memdesc.hostptr) {
1899 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1900 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001902 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001903
1904 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001906 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001907 return result;
1908}
1909
1910static long
1911kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1912 unsigned int cmd, void *data)
1913{
1914 struct kgsl_process_private *private = dev_priv->process_priv;
1915 struct kgsl_gpumem_alloc *param = data;
1916 struct kgsl_mem_entry *entry;
1917 int result;
1918
1919 entry = kgsl_mem_entry_create();
1920 if (entry == NULL)
1921 return -ENOMEM;
1922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1924 param->size, param->flags);
1925
1926 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001927 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 kgsl_mem_entry_attach_process(entry, private);
1929 param->gpuaddr = entry->memdesc.gpuaddr;
1930
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001931 kgsl_process_add_stats(private, entry->memtype, param->size);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001932 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001933 } else
1934 kfree(entry);
1935
1936 kgsl_check_idle(dev_priv->device);
1937 return result;
1938}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001939static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1940 unsigned int cmd, void *data)
1941{
1942 int result = 0;
1943 struct kgsl_cff_syncmem *param = data;
1944 struct kgsl_process_private *private = dev_priv->process_priv;
1945 struct kgsl_mem_entry *entry = NULL;
1946
1947 spin_lock(&private->mem_lock);
1948 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1949 if (entry)
1950 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1951 param->len, true);
1952 else
1953 result = -EINVAL;
1954 spin_unlock(&private->mem_lock);
1955 return result;
1956}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001958static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1959 unsigned int cmd, void *data)
1960{
1961 int result = 0;
1962 struct kgsl_cff_user_event *param = data;
1963
1964 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1965 param->op3, param->op4, param->op5);
1966
1967 return result;
1968}
1969
Jordan Croused4bc9d22011-11-17 13:39:21 -07001970#ifdef CONFIG_GENLOCK
1971struct kgsl_genlock_event_priv {
1972 struct genlock_handle *handle;
1973 struct genlock *lock;
1974};
1975
1976/**
1977 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
1978 * @device - The KGSL device that expired the timestamp
1979 * @priv - private data for the event
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001980 * @context_id - the context id that goes with the timestamp
Jordan Croused4bc9d22011-11-17 13:39:21 -07001981 * @timestamp - the timestamp that triggered the event
1982 *
1983 * Release a genlock lock following the expiration of a timestamp
1984 */
1985
1986static void kgsl_genlock_event_cb(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001987 void *priv, u32 context_id, u32 timestamp)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001988{
1989 struct kgsl_genlock_event_priv *ev = priv;
1990 int ret;
1991
1992 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
1993 if (ret)
1994 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
1995
1996 genlock_put_handle(ev->handle);
1997
1998 kfree(ev);
1999}
2000
2001/**
2002 * kgsl_add_genlock-event - Create a new genlock event
2003 * @device - KGSL device to create the event on
2004 * @timestamp - Timestamp to trigger the event
2005 * @data - User space buffer containing struct kgsl_genlock_event_priv
2006 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002007 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07002008 * @returns 0 on success or error code on error
2009 *
2010 * Attack to a genlock handle and register an event to release the
2011 * genlock lock when the timestamp expires
2012 */
2013
2014static int kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002015 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002016 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002017{
2018 struct kgsl_genlock_event_priv *event;
2019 struct kgsl_timestamp_event_genlock priv;
2020 int ret;
2021
2022 if (len != sizeof(priv))
2023 return -EINVAL;
2024
2025 if (copy_from_user(&priv, data, sizeof(priv)))
2026 return -EFAULT;
2027
2028 event = kzalloc(sizeof(*event), GFP_KERNEL);
2029
2030 if (event == NULL)
2031 return -ENOMEM;
2032
2033 event->handle = genlock_get_handle_fd(priv.handle);
2034
2035 if (IS_ERR(event->handle)) {
2036 int ret = PTR_ERR(event->handle);
2037 kfree(event);
2038 return ret;
2039 }
2040
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002041 ret = kgsl_add_event(device, context_id, timestamp,
2042 kgsl_genlock_event_cb, event, owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002043 if (ret)
2044 kfree(event);
2045
2046 return ret;
2047}
2048#else
2049static long kgsl_add_genlock_event(struct kgsl_device *device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002050 u32 context_id, u32 timestamp, void __user *data, int len,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07002051 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07002052{
2053 return -EINVAL;
2054}
2055#endif
2056
2057/**
2058 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
2059 * @dev_priv - pointer to the private device structure
2060 * @cmd - the ioctl cmd passed from kgsl_ioctl
2061 * @data - the user data buffer from kgsl_ioctl
2062 * @returns 0 on success or error code on failure
2063 */
2064
2065static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
2066 unsigned int cmd, void *data)
2067{
2068 struct kgsl_timestamp_event *param = data;
2069 int ret;
2070
2071 switch (param->type) {
2072 case KGSL_TIMESTAMP_EVENT_GENLOCK:
2073 ret = kgsl_add_genlock_event(dev_priv->device,
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002074 param->context_id, param->timestamp, param->priv,
2075 param->len, dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002076 break;
Jeff Boodyfe6c39c2012-08-09 13:54:50 -06002077 case KGSL_TIMESTAMP_EVENT_FENCE:
2078 ret = kgsl_add_fence_event(dev_priv->device,
2079 param->context_id, param->timestamp, param->priv,
2080 param->len, dev_priv);
2081 break;
Jordan Croused4bc9d22011-11-17 13:39:21 -07002082 default:
2083 ret = -EINVAL;
2084 }
2085
2086 return ret;
2087}
2088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
2090 unsigned int, void *);
2091
Vladimir Razgulin38345302013-01-22 18:41:59 -07002092#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
2093 [_IOC_NR((_cmd))] = \
2094 { .cmd = (_cmd), .func = (_func), .flags = (_flags) }
2095
2096#define KGSL_IOCTL_LOCK BIT(0)
2097#define KGSL_IOCTL_WAKE BIT(1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002098
2099static const struct {
2100 unsigned int cmd;
2101 kgsl_ioctl_func_t func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002102 int flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103} kgsl_ioctl_funcs[] = {
2104 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002105 kgsl_ioctl_device_getproperty,
2106 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002107 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002108 kgsl_ioctl_device_waittimestamp,
2109 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002110 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002111 kgsl_ioctl_device_waittimestamp_ctxtid,
2112 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002113 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002114 kgsl_ioctl_rb_issueibcmds,
2115 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002117 kgsl_ioctl_cmdstream_readtimestamp,
2118 KGSL_IOCTL_LOCK),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002119 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002120 kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
2121 KGSL_IOCTL_LOCK),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002123 kgsl_ioctl_cmdstream_freememontimestamp,
2124 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Carter Cooper7e7f02e2012-02-15 09:36:31 -07002125 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002126 kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
2127 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002129 kgsl_ioctl_drawctxt_create,
2130 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002131 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002132 kgsl_ioctl_drawctxt_destroy,
2133 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002134 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
2135 kgsl_ioctl_map_user_mem, 0),
2136 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
2137 kgsl_ioctl_map_user_mem, 0),
2138 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
2139 kgsl_ioctl_sharedmem_free, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002140 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
2141 kgsl_ioctl_sharedmem_flush_cache, 0),
2142 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
2143 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06002144 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
2145 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06002146 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
2147 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07002148 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002149 kgsl_ioctl_timestamp_event,
2150 KGSL_IOCTL_LOCK),
Jordan Crouseed7dd7f2012-03-29 13:16:02 -06002151 KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
Vladimir Razgulin38345302013-01-22 18:41:59 -07002152 kgsl_ioctl_device_setproperty,
2153 KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154};
2155
2156static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2157{
2158 struct kgsl_device_private *dev_priv = filep->private_data;
Jordan Crouse1e76f612012-08-08 13:24:21 -06002159 unsigned int nr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160 kgsl_ioctl_func_t func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002161 int lock, ret, use_hw;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002162 char ustack[64];
2163 void *uptr = NULL;
2164
2165 BUG_ON(dev_priv == NULL);
2166
2167 /* Workaround for an previously incorrectly defined ioctl code.
2168 This helps ensure binary compatability */
2169
2170 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
2171 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07002172 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
2173 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Jeff Boodyfe6c39c2012-08-09 13:54:50 -06002174 else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD)
2175 cmd = IOCTL_KGSL_TIMESTAMP_EVENT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002176
Jordan Crouse1e76f612012-08-08 13:24:21 -06002177 nr = _IOC_NR(cmd);
2178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002179 if (cmd & (IOC_IN | IOC_OUT)) {
2180 if (_IOC_SIZE(cmd) < sizeof(ustack))
2181 uptr = ustack;
2182 else {
2183 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
2184 if (uptr == NULL) {
2185 KGSL_MEM_ERR(dev_priv->device,
2186 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
2187 ret = -ENOMEM;
2188 goto done;
2189 }
2190 }
2191
2192 if (cmd & IOC_IN) {
2193 if (copy_from_user(uptr, (void __user *) arg,
2194 _IOC_SIZE(cmd))) {
2195 ret = -EFAULT;
2196 goto done;
2197 }
2198 } else
2199 memset(uptr, 0, _IOC_SIZE(cmd));
2200 }
2201
2202 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
Jordan Crouse1e76f612012-08-08 13:24:21 -06002203 kgsl_ioctl_funcs[nr].func != NULL) {
2204
2205 /*
2206 * Make sure that nobody tried to send us a malformed ioctl code
2207 * with a valid NR but bogus flags
2208 */
2209
2210 if (kgsl_ioctl_funcs[nr].cmd != cmd) {
2211 KGSL_DRV_ERR(dev_priv->device,
2212 "Malformed ioctl code %08x\n", cmd);
2213 ret = -ENOIOCTLCMD;
2214 goto done;
2215 }
2216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002217 func = kgsl_ioctl_funcs[nr].func;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002218 lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
2219 use_hw = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_WAKE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002220 } else {
2221 func = dev_priv->device->ftbl->ioctl;
2222 if (!func) {
2223 KGSL_DRV_INFO(dev_priv->device,
2224 "invalid ioctl code %08x\n", cmd);
Jeremy Gebbenc15b4612012-01-09 09:44:11 -07002225 ret = -ENOIOCTLCMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002226 goto done;
2227 }
2228 lock = 1;
Vladimir Razgulin38345302013-01-22 18:41:59 -07002229 use_hw = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002230 }
2231
2232 if (lock) {
2233 mutex_lock(&dev_priv->device->mutex);
Vladimir Razgulin38345302013-01-22 18:41:59 -07002234 if (use_hw)
2235 kgsl_check_suspended(dev_priv->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002236 }
2237
2238 ret = func(dev_priv, cmd, uptr);
2239
2240 if (lock) {
2241 kgsl_check_idle_locked(dev_priv->device);
2242 mutex_unlock(&dev_priv->device->mutex);
2243 }
2244
2245 if (ret == 0 && (cmd & IOC_OUT)) {
2246 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
2247 ret = -EFAULT;
2248 }
2249
2250done:
2251 if (_IOC_SIZE(cmd) >= sizeof(ustack))
2252 kfree(uptr);
2253
2254 return ret;
2255}
2256
2257static int
2258kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
2259{
2260 struct kgsl_memdesc *memdesc = &device->memstore;
2261 int result;
2262 unsigned int vma_size = vma->vm_end - vma->vm_start;
2263
2264 /* The memstore can only be mapped as read only */
2265
2266 if (vma->vm_flags & VM_WRITE)
2267 return -EPERM;
2268
2269 if (memdesc->size != vma_size) {
2270 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
2271 vma_size, memdesc->size);
2272 return -EINVAL;
2273 }
2274
2275 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2276
Shubhraprakash Das87f68132012-07-30 23:25:13 -07002277 result = remap_pfn_range(vma, vma->vm_start,
2278 device->memstore.physaddr >> PAGE_SHIFT,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279 vma_size, vma->vm_page_prot);
2280 if (result != 0)
2281 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2282 result);
2283
2284 return result;
2285}
2286
Jordan Crouse4283e172011-09-26 14:45:47 -06002287/*
2288 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2289 * Increase the refcount to make sure that the accounting stays correct
2290 */
2291
2292static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2293{
2294 struct kgsl_mem_entry *entry = vma->vm_private_data;
2295 kgsl_mem_entry_get(entry);
2296}
2297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002298static int
2299kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2300{
2301 struct kgsl_mem_entry *entry = vma->vm_private_data;
2302
Jordan Croused17e9aa2011-10-12 16:57:48 -06002303 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002304 return VM_FAULT_SIGBUS;
2305
2306 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2307}
2308
2309static void
2310kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2311{
2312 struct kgsl_mem_entry *entry = vma->vm_private_data;
2313 kgsl_mem_entry_put(entry);
2314}
2315
2316static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002317 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318 .fault = kgsl_gpumem_vm_fault,
2319 .close = kgsl_gpumem_vm_close,
2320};
2321
2322static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2323{
2324 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002325 struct kgsl_device_private *dev_priv = file->private_data;
2326 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crousec9559e42012-04-05 16:55:56 -06002327 struct kgsl_mem_entry *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002328 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002329
2330 /* Handle leagacy behavior for memstore */
2331
Shubhraprakash Das87f68132012-07-30 23:25:13 -07002332 if (vma_offset == device->memstore.gpuaddr)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002333 return kgsl_mmap_memstore(device, vma);
2334
2335 /* Find a chunk of GPU memory */
2336
2337 spin_lock(&private->mem_lock);
Jordan Crousec9559e42012-04-05 16:55:56 -06002338 entry = kgsl_sharedmem_find(private, vma_offset);
2339
2340 if (entry)
2341 kgsl_mem_entry_get(entry);
2342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343 spin_unlock(&private->mem_lock);
2344
2345 if (entry == NULL)
2346 return -EINVAL;
2347
Jordan Croused17e9aa2011-10-12 16:57:48 -06002348 if (!entry->memdesc.ops ||
2349 !entry->memdesc.ops->vmflags ||
2350 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002351 return -EINVAL;
2352
2353 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2354
2355 vma->vm_private_data = entry;
2356 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2357 vma->vm_ops = &kgsl_gpumem_vm_ops;
2358 vma->vm_file = file;
2359
2360 return 0;
2361}
2362
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002363static irqreturn_t kgsl_irq_handler(int irq, void *data)
2364{
2365 struct kgsl_device *device = data;
2366
2367 return device->ftbl->irq_handler(device);
2368
2369}
2370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371static const struct file_operations kgsl_fops = {
2372 .owner = THIS_MODULE,
2373 .release = kgsl_release,
2374 .open = kgsl_open,
2375 .mmap = kgsl_mmap,
2376 .unlocked_ioctl = kgsl_ioctl,
2377};
2378
2379struct kgsl_driver kgsl_driver = {
2380 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2381 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2382 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2383};
2384EXPORT_SYMBOL(kgsl_driver);
2385
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002386static void _unregister_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387{
2388 int minor;
2389
2390 mutex_lock(&kgsl_driver.devlock);
2391 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2392 if (device == kgsl_driver.devp[minor])
2393 break;
2394 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002395 if (minor != KGSL_DEVICE_MAX) {
2396 device_destroy(kgsl_driver.class,
2397 MKDEV(MAJOR(kgsl_driver.major), minor));
2398 kgsl_driver.devp[minor] = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002399 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002400 mutex_unlock(&kgsl_driver.devlock);
2401}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002402
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002403static int _register_device(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002404{
2405 int minor, ret;
2406 dev_t dev;
2407
2408 /* Find a minor for the device */
2409
2410 mutex_lock(&kgsl_driver.devlock);
2411 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2412 if (kgsl_driver.devp[minor] == NULL) {
2413 kgsl_driver.devp[minor] = device;
2414 break;
2415 }
2416 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002417 mutex_unlock(&kgsl_driver.devlock);
2418
2419 if (minor == KGSL_DEVICE_MAX) {
2420 KGSL_CORE_ERR("minor devices exhausted\n");
2421 return -ENODEV;
2422 }
2423
2424 /* Create the device */
2425 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2426 device->dev = device_create(kgsl_driver.class,
2427 device->parentdev,
2428 dev, device,
2429 device->name);
2430
2431 if (IS_ERR(device->dev)) {
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002432 mutex_lock(&kgsl_driver.devlock);
2433 kgsl_driver.devp[minor] = NULL;
2434 mutex_unlock(&kgsl_driver.devlock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002435 ret = PTR_ERR(device->dev);
2436 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002437 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002438 }
2439
2440 dev_set_drvdata(device->parentdev, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002442}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002443
Jordan Crouseb368e9b2012-04-27 14:01:59 -06002444int kgsl_device_platform_probe(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002445{
Michael Street8bacdd02012-01-05 14:55:01 -08002446 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002447 int status = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448 struct resource *res;
2449 struct platform_device *pdev =
2450 container_of(device->parentdev, struct platform_device, dev);
2451
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002452 status = _register_device(device);
2453 if (status)
2454 return status;
2455
2456 /* Initialize logging first, so that failures below actually print. */
2457 kgsl_device_debugfs_init(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458
2459 status = kgsl_pwrctrl_init(device);
2460 if (status)
2461 goto error;
2462
Harsh Vardhan Dwivedif48af7f2012-04-13 12:50:44 -06002463 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
2464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2466 device->iomemname);
2467 if (res == NULL) {
2468 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2469 status = -EINVAL;
2470 goto error_pwrctrl_close;
2471 }
2472 if (res->start == 0 || resource_size(res) == 0) {
Jordan Crouse7501d452012-04-19 08:58:44 -06002473 KGSL_DRV_ERR(device, "dev %d invalid register region\n",
2474 device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002475 status = -EINVAL;
2476 goto error_pwrctrl_close;
2477 }
2478
Jordan Crouse7501d452012-04-19 08:58:44 -06002479 device->reg_phys = res->start;
2480 device->reg_len = resource_size(res);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002481
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002482 if (!devm_request_mem_region(device->dev, device->reg_phys,
2483 device->reg_len, device->name)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002484 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2485 status = -ENODEV;
2486 goto error_pwrctrl_close;
2487 }
2488
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002489 device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
2490 device->reg_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491
Jordan Crouse7501d452012-04-19 08:58:44 -06002492 if (device->reg_virt == NULL) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002493 KGSL_DRV_ERR(device, "ioremap failed\n");
2494 status = -ENODEV;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002495 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002496 }
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002497 /*acquire interrupt */
2498 device->pwrctrl.interrupt_num =
2499 platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
2500
2501 if (device->pwrctrl.interrupt_num <= 0) {
2502 KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
2503 device->pwrctrl.interrupt_num);
2504 status = -EINVAL;
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002505 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002506 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002507
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002508 status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
2509 kgsl_irq_handler, IRQF_TRIGGER_HIGH,
2510 device->name, device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002511 if (status) {
2512 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2513 device->pwrctrl.interrupt_num, status);
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002514 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002515 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002516 disable_irq(device->pwrctrl.interrupt_num);
2517
2518 KGSL_DRV_INFO(device,
Jordan Crouse7501d452012-04-19 08:58:44 -06002519 "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
2520 device->id, device->reg_phys, device->reg_len,
2521 device->reg_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002522
Michael Street8bacdd02012-01-05 14:55:01 -08002523 result = kgsl_drm_init(pdev);
2524 if (result)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002525 goto error_pwrctrl_close;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002526
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002527 kgsl_cffdump_open(device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002528
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002529 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2530 status = kgsl_create_device_workqueue(device);
2531 if (status)
Jeremy Gebben4204d0f2012-03-01 16:06:21 -07002532 goto error_pwrctrl_close;
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002533
2534 status = kgsl_mmu_init(device);
2535 if (status != 0) {
2536 KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
2537 goto error_dest_work_q;
2538 }
2539
2540 status = kgsl_allocate_contiguous(&device->memstore,
Richard Ruigrok2ad5e9d2012-06-14 14:22:05 -07002541 KGSL_MEMSTORE_SIZE);
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002542
2543 if (status != 0) {
2544 KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
2545 status);
2546 goto error_close_mmu;
2547 }
2548
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002549 pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
2550 PM_QOS_DEFAULT_VALUE);
2551
2552 /* Initalize the snapshot engine */
2553 kgsl_device_snapshot_init(device);
2554
2555 /* Initialize common sysfs entries */
2556 kgsl_pwrctrl_init_sysfs(device);
2557
2558 return 0;
2559
2560error_close_mmu:
2561 kgsl_mmu_close(device);
2562error_dest_work_q:
2563 destroy_workqueue(device->work_queue);
2564 device->work_queue = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002565error_pwrctrl_close:
2566 kgsl_pwrctrl_close(device);
2567error:
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002568 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002569 return status;
2570}
2571EXPORT_SYMBOL(kgsl_device_platform_probe);
2572
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002573int kgsl_postmortem_dump(struct kgsl_device *device, int manual)
2574{
2575 bool saved_nap;
2576 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
2577
2578 BUG_ON(device == NULL);
2579
2580 kgsl_cffdump_hang(device->id);
2581
2582 /* For a manual dump, make sure that the system is idle */
2583
2584 if (manual) {
2585 if (device->active_cnt != 0) {
2586 mutex_unlock(&device->mutex);
2587 wait_for_completion(&device->suspend_gate);
2588 mutex_lock(&device->mutex);
2589 }
2590
2591 if (device->state == KGSL_STATE_ACTIVE)
Jordan Crousea29a2e02012-08-14 09:09:23 -06002592 kgsl_idle(device);
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -06002593
2594 }
2595 KGSL_LOG_DUMP(device, "|%s| Dump Started\n", device->name);
2596 KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
2597 pwr->power_flags, pwr->active_pwrlevel);
2598
2599 KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
2600 pwr->interval_timeout);
2601
2602 KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
2603 kgsl_get_clkrate(pwr->grp_clks[0]));
2604
2605 KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
2606 kgsl_get_clkrate(pwr->ebi1_clk));
2607
2608 /* Disable the idle timer so we don't get interrupted */
2609 del_timer_sync(&device->idle_timer);
2610 mutex_unlock(&device->mutex);
2611 flush_workqueue(device->work_queue);
2612 mutex_lock(&device->mutex);
2613
2614 /* Turn off napping to make sure we have the clocks full
2615 attention through the following process */
2616 saved_nap = device->pwrctrl.nap_allowed;
2617 device->pwrctrl.nap_allowed = false;
2618
2619 /* Force on the clocks */
2620 kgsl_pwrctrl_wake(device);
2621
2622 /* Disable the irq */
2623 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
2624
2625 /*Call the device specific postmortem dump function*/
2626 device->ftbl->postmortem_dump(device, manual);
2627
2628 /* Restore nap mode */
2629 device->pwrctrl.nap_allowed = saved_nap;
2630
2631 /* On a manual trigger, turn on the interrupts and put
2632 the clocks to sleep. They will recover themselves
2633 on the next event. For a hang, leave things as they
2634 are until recovery kicks in. */
2635
2636 if (manual) {
2637 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
2638
2639 /* try to go into a sleep mode until the next event */
2640 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
2641 kgsl_pwrctrl_sleep(device);
2642 }
2643
2644 KGSL_LOG_DUMP(device, "|%s| Dump Finished\n", device->name);
2645
2646 return 0;
2647}
2648EXPORT_SYMBOL(kgsl_postmortem_dump);
2649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002650void kgsl_device_platform_remove(struct kgsl_device *device)
2651{
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002652 kgsl_device_snapshot_close(device);
2653
2654 kgsl_cffdump_close(device->id);
2655 kgsl_pwrctrl_uninit_sysfs(device);
2656
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002657 pm_qos_remove_request(&device->pm_qos_req_dma);
2658
2659 idr_destroy(&device->context_idr);
2660
2661 kgsl_sharedmem_free(&device->memstore);
2662
2663 kgsl_mmu_close(device);
2664
2665 if (device->work_queue) {
2666 destroy_workqueue(device->work_queue);
2667 device->work_queue = NULL;
2668 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002669 kgsl_pwrctrl_close(device);
2670
Jeremy Gebben4f5f0de2012-03-01 15:51:37 -07002671 _unregister_device(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002672}
2673EXPORT_SYMBOL(kgsl_device_platform_remove);
2674
2675static int __devinit
2676kgsl_ptdata_init(void)
2677{
Jordan Crouse6d76c4d2012-03-26 09:50:43 -06002678 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count);
2679
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002680 if (!kgsl_driver.ptpool)
2681 return -ENOMEM;
2682 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002683}
2684
2685static void kgsl_core_exit(void)
2686{
Ranjhith Kalisamy4ad59e92012-05-31 19:15:11 +05302687 kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002688 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002689
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302690 kgsl_drm_exit();
2691 kgsl_cffdump_destroy();
2692 kgsl_core_debugfs_close();
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302693
Harsh Vardhan Dwivediefa6b012012-06-15 13:02:27 -06002694 /*
2695 * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
2696 * only if kgsl_driver.virtdev has been populated.
2697 * We check at least one member of kgsl_driver.virtdev to
2698 * see if it is not NULL (and thus, has been populated).
2699 */
2700 if (kgsl_driver.virtdev.class) {
2701 kgsl_sharedmem_uninit_sysfs();
2702 device_unregister(&kgsl_driver.virtdev);
2703 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002704
2705 if (kgsl_driver.class) {
2706 class_destroy(kgsl_driver.class);
2707 kgsl_driver.class = NULL;
2708 }
2709
Ranjhith Kalisamydad9df52012-06-01 17:05:13 +05302710 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002711}
2712
2713static int __init kgsl_core_init(void)
2714{
2715 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002716 /* alloc major and minor device numbers */
2717 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2718 KGSL_NAME);
2719 if (result < 0) {
2720 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2721 goto err;
2722 }
2723
2724 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2725 kgsl_driver.cdev.owner = THIS_MODULE;
2726 kgsl_driver.cdev.ops = &kgsl_fops;
2727 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2728 KGSL_DEVICE_MAX);
2729
2730 if (result) {
2731 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2732 " result= %d\n", kgsl_driver.major, result);
2733 goto err;
2734 }
2735
2736 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2737
2738 if (IS_ERR(kgsl_driver.class)) {
2739 result = PTR_ERR(kgsl_driver.class);
2740 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2741 goto err;
2742 }
2743
2744 /* Make a virtual device for managing core related things
2745 in sysfs */
2746 kgsl_driver.virtdev.class = kgsl_driver.class;
2747 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2748 result = device_register(&kgsl_driver.virtdev);
2749 if (result) {
2750 KGSL_CORE_ERR("driver_register failed\n");
2751 goto err;
2752 }
2753
2754 /* Make kobjects in the virtual device for storing statistics */
2755
2756 kgsl_driver.ptkobj =
2757 kobject_create_and_add("pagetables",
2758 &kgsl_driver.virtdev.kobj);
2759
2760 kgsl_driver.prockobj =
2761 kobject_create_and_add("proc",
2762 &kgsl_driver.virtdev.kobj);
2763
2764 kgsl_core_debugfs_init();
2765
2766 kgsl_sharedmem_init_sysfs();
2767 kgsl_cffdump_init();
2768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002769 INIT_LIST_HEAD(&kgsl_driver.process_list);
2770
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002771 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2772
2773 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2774
2775 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2776 result = kgsl_ptdata_init();
2777 if (result)
2778 goto err;
2779 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002781 return 0;
2782
2783err:
2784 kgsl_core_exit();
2785 return result;
2786}
2787
2788module_init(kgsl_core_init);
2789module_exit(kgsl_core_exit);
2790
2791MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2792MODULE_DESCRIPTION("MSM GPU driver");
2793MODULE_LICENSE("GPL");