blob: cbdf90230f77dddff75fca986ecd1b8ff58cf6f9 [file] [log] [blame]
Tarun Karraf8e5cd22012-01-09 14:10:09 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/fb.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/debugfs.h>
17#include <linux/uaccess.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/android_pmem.h>
21#include <linux/vmalloc.h>
22#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070023#include <linux/genlock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include <linux/ashmem.h>
26#include <linux/major.h>
Jordan Crouse8eab35a2011-10-12 16:57:48 -060027#include <linux/ion.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070028#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include "kgsl.h"
31#include "kgsl_debugfs.h"
32#include "kgsl_cffdump.h"
33#include "kgsl_log.h"
34#include "kgsl_sharedmem.h"
35#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060036#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "kgsl."
40
41static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060042static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043module_param_named(ptcount, kgsl_pagetable_count, int, 0);
44MODULE_PARM_DESC(kgsl_pagetable_count,
45"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060046module_param_named(mmutype, ksgl_mmu_type, charp, 0);
47MODULE_PARM_DESC(ksgl_mmu_type,
48"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jordan Crouse8eab35a2011-10-12 16:57:48 -060050static struct ion_client *kgsl_ion_client;
51
Jordan Croused4bc9d22011-11-17 13:39:21 -070052/**
53 * kgsl_add_event - Add a new timstamp event for the KGSL device
54 * @device - KGSL device for the new event
55 * @ts - the timestamp to trigger the event on
56 * @cb - callback function to call when the timestamp expires
57 * @priv - private data for the specific event type
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070058 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -070059 *
60 * @returns - 0 on success or error code on failure
61 */
62
63static int kgsl_add_event(struct kgsl_device *device, u32 ts,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070064 void (*cb)(struct kgsl_device *, void *, u32), void *priv,
65 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -070066{
67 struct kgsl_event *event;
68 struct list_head *n;
69 unsigned int cur = device->ftbl->readtimestamp(device,
70 KGSL_TIMESTAMP_RETIRED);
71
72 if (cb == NULL)
73 return -EINVAL;
74
75 /* Check to see if the requested timestamp has already fired */
76
77 if (timestamp_cmp(cur, ts) >= 0) {
78 cb(device, priv, cur);
79 return 0;
80 }
81
82 event = kzalloc(sizeof(*event), GFP_KERNEL);
83 if (event == NULL)
84 return -ENOMEM;
85
86 event->timestamp = ts;
87 event->priv = priv;
88 event->func = cb;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070089 event->owner = owner;
Jordan Croused4bc9d22011-11-17 13:39:21 -070090
91 /* Add the event in order to the list */
92
93 for (n = device->events.next ; n != &device->events; n = n->next) {
94 struct kgsl_event *e =
95 list_entry(n, struct kgsl_event, list);
96
97 if (timestamp_cmp(e->timestamp, ts) > 0) {
98 list_add(&event->list, n->prev);
99 break;
100 }
101 }
102
103 if (n == &device->events)
104 list_add_tail(&event->list, &device->events);
105
Jeremy Gebben63904832012-02-07 16:10:55 -0700106 queue_work(device->work_queue, &device->ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700107 return 0;
108}
Jordan Croused4bc9d22011-11-17 13:39:21 -0700109
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700110/**
111 * kgsl_cancel_events - Cancel all events for a process
112 * @device - KGSL device for the events to cancel
113 * @owner - driver instance that owns the events to cancel
114 *
115 */
116static void kgsl_cancel_events(struct kgsl_device *device,
117 struct kgsl_device_private *owner)
118{
119 struct kgsl_event *event, *event_tmp;
120 unsigned int cur = device->ftbl->readtimestamp(device,
121 KGSL_TIMESTAMP_RETIRED);
122
123 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
124 if (event->owner != owner)
125 continue;
126 /*
127 * "cancel" the events by calling their callback.
128 * Currently, events are used for lock and memory
129 * management, so if the process is dying the right
130 * thing to do is release or free.
131 */
132 if (event->func)
133 event->func(device, event->priv, cur);
134
135 list_del(&event->list);
136 kfree(event);
137 }
138}
139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140static inline struct kgsl_mem_entry *
141kgsl_mem_entry_create(void)
142{
143 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
144
145 if (!entry)
146 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
147 else
148 kref_init(&entry->refcount);
149
150 return entry;
151}
152
153void
154kgsl_mem_entry_destroy(struct kref *kref)
155{
156 struct kgsl_mem_entry *entry = container_of(kref,
157 struct kgsl_mem_entry,
158 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600159
160 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
161
162 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
163 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600165 /*
166 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
167 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
168 * doesn't try to free it again
169 */
170
171 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
172 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
173 entry->memdesc.sg = NULL;
174 }
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 kgsl_sharedmem_free(&entry->memdesc);
177
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600178 switch (entry->memtype) {
179 case KGSL_MEM_ENTRY_PMEM:
180 case KGSL_MEM_ENTRY_ASHMEM:
181 if (entry->priv_data)
182 fput(entry->priv_data);
183 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600184 case KGSL_MEM_ENTRY_ION:
185 ion_free(kgsl_ion_client, entry->priv_data);
186 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 }
188
189 kfree(entry);
190}
191EXPORT_SYMBOL(kgsl_mem_entry_destroy);
192
193static
194void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
195 struct kgsl_process_private *process)
196{
197 spin_lock(&process->mem_lock);
198 list_add(&entry->list, &process->mem_list);
199 spin_unlock(&process->mem_lock);
200
201 entry->priv = process;
202}
203
204/* Allocate a new context id */
205
206static struct kgsl_context *
207kgsl_create_context(struct kgsl_device_private *dev_priv)
208{
209 struct kgsl_context *context;
210 int ret, id;
211
212 context = kzalloc(sizeof(*context), GFP_KERNEL);
213
214 if (context == NULL)
215 return NULL;
216
217 while (1) {
218 if (idr_pre_get(&dev_priv->device->context_idr,
219 GFP_KERNEL) == 0) {
220 kfree(context);
221 return NULL;
222 }
223
224 ret = idr_get_new(&dev_priv->device->context_idr,
225 context, &id);
226
227 if (ret != -EAGAIN)
228 break;
229 }
230
231 if (ret) {
232 kfree(context);
233 return NULL;
234 }
235
236 context->id = id;
237 context->dev_priv = dev_priv;
238
239 return context;
240}
241
242static void
243kgsl_destroy_context(struct kgsl_device_private *dev_priv,
244 struct kgsl_context *context)
245{
246 int id;
247
248 if (context == NULL)
249 return;
250
251 /* Fire a bug if the devctxt hasn't been freed */
252 BUG_ON(context->devctxt);
253
254 id = context->id;
255 kfree(context);
256
257 idr_remove(&dev_priv->device->context_idr, id);
258}
259
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600260static void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600262 struct kgsl_device *device = container_of(work, struct kgsl_device,
263 ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700264 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 uint32_t ts_processed;
266
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600267 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
269 /* get current EOP timestamp */
270 ts_processed = device->ftbl->readtimestamp(device,
271 KGSL_TIMESTAMP_RETIRED);
272
Jordan Croused4bc9d22011-11-17 13:39:21 -0700273 /* Process expired events */
274 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
275 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
276 break;
277
278 if (event->func)
279 event->func(device, event->priv, ts_processed);
280
281 list_del(&event->list);
282 kfree(event);
283 }
284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 mutex_unlock(&device->mutex);
286}
287
288static void kgsl_check_idle_locked(struct kgsl_device *device)
289{
290 if (device->pwrctrl.nap_allowed == true &&
291 device->state == KGSL_STATE_ACTIVE &&
292 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700293 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 if (kgsl_pwrctrl_sleep(device) != 0)
295 mod_timer(&device->idle_timer,
296 jiffies +
297 device->pwrctrl.interval_timeout);
298 }
299}
300
301static void kgsl_check_idle(struct kgsl_device *device)
302{
303 mutex_lock(&device->mutex);
304 kgsl_check_idle_locked(device);
305 mutex_unlock(&device->mutex);
306}
307
308struct kgsl_device *kgsl_get_device(int dev_idx)
309{
310 int i;
311 struct kgsl_device *ret = NULL;
312
313 mutex_lock(&kgsl_driver.devlock);
314
315 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
316 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
317 ret = kgsl_driver.devp[i];
318 break;
319 }
320 }
321
322 mutex_unlock(&kgsl_driver.devlock);
323 return ret;
324}
325EXPORT_SYMBOL(kgsl_get_device);
326
327static struct kgsl_device *kgsl_get_minor(int minor)
328{
329 struct kgsl_device *ret = NULL;
330
331 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
332 return NULL;
333
334 mutex_lock(&kgsl_driver.devlock);
335 ret = kgsl_driver.devp[minor];
336 mutex_unlock(&kgsl_driver.devlock);
337
338 return ret;
339}
340
341int kgsl_register_ts_notifier(struct kgsl_device *device,
342 struct notifier_block *nb)
343{
344 BUG_ON(device == NULL);
345 return atomic_notifier_chain_register(&device->ts_notifier_list,
346 nb);
347}
348EXPORT_SYMBOL(kgsl_register_ts_notifier);
349
350int kgsl_unregister_ts_notifier(struct kgsl_device *device,
351 struct notifier_block *nb)
352{
353 BUG_ON(device == NULL);
354 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
355 nb);
356}
357EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
358
359int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
360{
361 unsigned int ts_processed;
362
363 ts_processed = device->ftbl->readtimestamp(device,
364 KGSL_TIMESTAMP_RETIRED);
365
Jordan Crousee6239dd2011-11-17 13:39:21 -0700366 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367}
368EXPORT_SYMBOL(kgsl_check_timestamp);
369
370static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
371{
372 int status = -EINVAL;
373 unsigned int nap_allowed_saved;
374 struct kgsl_pwrscale_policy *policy_saved;
375
376 if (!device)
377 return -EINVAL;
378
379 KGSL_PWR_WARN(device, "suspend start\n");
380
381 mutex_lock(&device->mutex);
382 nap_allowed_saved = device->pwrctrl.nap_allowed;
383 device->pwrctrl.nap_allowed = false;
384 policy_saved = device->pwrscale.policy;
385 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700386 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* Make sure no user process is waiting for a timestamp *
388 * before supending */
389 if (device->active_cnt != 0) {
390 mutex_unlock(&device->mutex);
391 wait_for_completion(&device->suspend_gate);
392 mutex_lock(&device->mutex);
393 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800394 /* Don't let the timer wake us during suspended sleep. */
395 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 switch (device->state) {
397 case KGSL_STATE_INIT:
398 break;
399 case KGSL_STATE_ACTIVE:
400 /* Wait for the device to become idle */
401 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
402 case KGSL_STATE_NAP:
403 case KGSL_STATE_SLEEP:
404 /* Get the completion ready to be waited upon. */
405 INIT_COMPLETION(device->hwaccess_gate);
406 device->ftbl->suspend_context(device);
407 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700408 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700410 case KGSL_STATE_SLUMBER:
411 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700412 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700413 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 default:
415 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
416 device->id);
417 goto end;
418 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700419 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 device->pwrctrl.nap_allowed = nap_allowed_saved;
421 device->pwrscale.policy = policy_saved;
422 status = 0;
423
424end:
425 mutex_unlock(&device->mutex);
426 KGSL_PWR_WARN(device, "suspend end\n");
427 return status;
428}
429
430static int kgsl_resume_device(struct kgsl_device *device)
431{
432 int status = -EINVAL;
433
434 if (!device)
435 return -EINVAL;
436
437 KGSL_PWR_WARN(device, "resume start\n");
438 mutex_lock(&device->mutex);
439 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700440 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700441 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 complete_all(&device->hwaccess_gate);
443 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700444 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 KGSL_PWR_WARN(device, "resume end\n");
448 return status;
449}
450
451static int kgsl_suspend(struct device *dev)
452{
453
454 pm_message_t arg = {0};
455 struct kgsl_device *device = dev_get_drvdata(dev);
456 return kgsl_suspend_device(device, arg);
457}
458
459static int kgsl_resume(struct device *dev)
460{
461 struct kgsl_device *device = dev_get_drvdata(dev);
462 return kgsl_resume_device(device);
463}
464
465static int kgsl_runtime_suspend(struct device *dev)
466{
467 return 0;
468}
469
470static int kgsl_runtime_resume(struct device *dev)
471{
472 return 0;
473}
474
475const struct dev_pm_ops kgsl_pm_ops = {
476 .suspend = kgsl_suspend,
477 .resume = kgsl_resume,
478 .runtime_suspend = kgsl_runtime_suspend,
479 .runtime_resume = kgsl_runtime_resume,
480};
481EXPORT_SYMBOL(kgsl_pm_ops);
482
483void kgsl_early_suspend_driver(struct early_suspend *h)
484{
485 struct kgsl_device *device = container_of(h,
486 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700487 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530488 mutex_lock(&device->mutex);
Lucille Sylvester344e4622012-01-18 15:53:21 -0700489 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700490 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530491 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700492 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493}
494EXPORT_SYMBOL(kgsl_early_suspend_driver);
495
496int kgsl_suspend_driver(struct platform_device *pdev,
497 pm_message_t state)
498{
499 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
500 return kgsl_suspend_device(device, state);
501}
502EXPORT_SYMBOL(kgsl_suspend_driver);
503
504int kgsl_resume_driver(struct platform_device *pdev)
505{
506 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
507 return kgsl_resume_device(device);
508}
509EXPORT_SYMBOL(kgsl_resume_driver);
510
511void kgsl_late_resume_driver(struct early_suspend *h)
512{
513 struct kgsl_device *device = container_of(h,
514 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700515 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530516 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700517 kgsl_pwrctrl_wake(device);
518 device->pwrctrl.restore_slumber = 0;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700519 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530520 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700521 kgsl_check_idle(device);
522 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523}
524EXPORT_SYMBOL(kgsl_late_resume_driver);
525
526/* file operations */
527static struct kgsl_process_private *
528kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
529{
530 struct kgsl_process_private *private;
531
532 mutex_lock(&kgsl_driver.process_mutex);
533 list_for_each_entry(private, &kgsl_driver.process_list, list) {
534 if (private->pid == task_tgid_nr(current)) {
535 private->refcnt++;
536 goto out;
537 }
538 }
539
540 /* no existing process private found for this dev_priv, create one */
541 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
542 if (private == NULL) {
543 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
544 sizeof(struct kgsl_process_private));
545 goto out;
546 }
547
548 spin_lock_init(&private->mem_lock);
549 private->refcnt = 1;
550 private->pid = task_tgid_nr(current);
551
552 INIT_LIST_HEAD(&private->mem_list);
553
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600554 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 {
556 unsigned long pt_name;
557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 private->pagetable = kgsl_mmu_getpagetable(pt_name);
560 if (private->pagetable == NULL) {
561 kfree(private);
562 private = NULL;
563 goto out;
564 }
565 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566
567 list_add(&private->list, &kgsl_driver.process_list);
568
569 kgsl_process_init_sysfs(private);
570
571out:
572 mutex_unlock(&kgsl_driver.process_mutex);
573 return private;
574}
575
576static void
577kgsl_put_process_private(struct kgsl_device *device,
578 struct kgsl_process_private *private)
579{
580 struct kgsl_mem_entry *entry = NULL;
581 struct kgsl_mem_entry *entry_tmp = NULL;
582
583 if (!private)
584 return;
585
586 mutex_lock(&kgsl_driver.process_mutex);
587
588 if (--private->refcnt)
589 goto unlock;
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 kgsl_process_uninit_sysfs(private);
592
593 list_del(&private->list);
594
595 list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
596 list_del(&entry->list);
597 kgsl_mem_entry_put(entry);
598 }
599
600 kgsl_mmu_putpagetable(private->pagetable);
601 kfree(private);
602unlock:
603 mutex_unlock(&kgsl_driver.process_mutex);
604}
605
606static int kgsl_release(struct inode *inodep, struct file *filep)
607{
608 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600609 struct kgsl_device_private *dev_priv = filep->private_data;
610 struct kgsl_process_private *private = dev_priv->process_priv;
611 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 struct kgsl_context *context;
613 int next = 0;
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 filep->private_data = NULL;
616
617 mutex_lock(&device->mutex);
618 kgsl_check_suspended(device);
619
620 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600621 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 if (context == NULL)
623 break;
624
625 if (context->dev_priv == dev_priv) {
626 device->ftbl->drawctxt_destroy(device, context);
627 kgsl_destroy_context(dev_priv, context);
628 }
629
630 next = next + 1;
631 }
632
633 device->open_count--;
634 if (device->open_count == 0) {
635 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700636 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 }
638 /* clean up any to-be-freed entries that belong to this
639 * process and this device
640 */
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700641 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
643 mutex_unlock(&device->mutex);
644 kfree(dev_priv);
645
646 kgsl_put_process_private(device, private);
647
648 pm_runtime_put(device->parentdev);
649 return result;
650}
651
652static int kgsl_open(struct inode *inodep, struct file *filep)
653{
654 int result;
655 struct kgsl_device_private *dev_priv;
656 struct kgsl_device *device;
657 unsigned int minor = iminor(inodep);
658
659 device = kgsl_get_minor(minor);
660 BUG_ON(device == NULL);
661
662 if (filep->f_flags & O_EXCL) {
663 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
664 return -EBUSY;
665 }
666
667 result = pm_runtime_get_sync(device->parentdev);
668 if (result < 0) {
669 KGSL_DRV_ERR(device,
670 "Runtime PM: Unable to wake up the device, rc = %d\n",
671 result);
672 return result;
673 }
674 result = 0;
675
676 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
677 if (dev_priv == NULL) {
678 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
679 sizeof(struct kgsl_device_private));
680 result = -ENOMEM;
681 goto err_pmruntime;
682 }
683
684 dev_priv->device = device;
685 filep->private_data = dev_priv;
686
687 /* Get file (per process) private struct */
688 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
689 if (dev_priv->process_priv == NULL) {
690 result = -ENOMEM;
691 goto err_freedevpriv;
692 }
693
694 mutex_lock(&device->mutex);
695 kgsl_check_suspended(device);
696
697 if (device->open_count == 0) {
698 result = device->ftbl->start(device, true);
699
700 if (result) {
701 mutex_unlock(&device->mutex);
702 goto err_putprocess;
703 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700704 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 }
706 device->open_count++;
707 mutex_unlock(&device->mutex);
708
709 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
710 device->name, kgsl_mmu_enabled() ? "on" : "off",
711 kgsl_pagetable_count);
712
713 return result;
714
715err_putprocess:
716 kgsl_put_process_private(device, dev_priv->process_priv);
717err_freedevpriv:
718 filep->private_data = NULL;
719 kfree(dev_priv);
720err_pmruntime:
721 pm_runtime_put(device->parentdev);
722 return result;
723}
724
725
726/*call with private->mem_lock locked */
727static struct kgsl_mem_entry *
728kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
729{
730 struct kgsl_mem_entry *entry = NULL, *result = NULL;
731
732 BUG_ON(private == NULL);
733
734 gpuaddr &= PAGE_MASK;
735
736 list_for_each_entry(entry, &private->mem_list, list) {
737 if (entry->memdesc.gpuaddr == gpuaddr) {
738 result = entry;
739 break;
740 }
741 }
742 return result;
743}
744
745/*call with private->mem_lock locked */
746struct kgsl_mem_entry *
747kgsl_sharedmem_find_region(struct kgsl_process_private *private,
748 unsigned int gpuaddr,
749 size_t size)
750{
751 struct kgsl_mem_entry *entry = NULL, *result = NULL;
752
753 BUG_ON(private == NULL);
754
755 list_for_each_entry(entry, &private->mem_list, list) {
Jeremy Gebben16e80fa2011-11-30 15:56:29 -0700756 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 result = entry;
758 break;
759 }
760 }
761
762 return result;
763}
764EXPORT_SYMBOL(kgsl_sharedmem_find_region);
765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766/*call all ioctl sub functions with driver locked*/
767static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
768 unsigned int cmd, void *data)
769{
770 int result = 0;
771 struct kgsl_device_getproperty *param = data;
772
773 switch (param->type) {
774 case KGSL_PROP_VERSION:
775 {
776 struct kgsl_version version;
777 if (param->sizebytes != sizeof(version)) {
778 result = -EINVAL;
779 break;
780 }
781
782 version.drv_major = KGSL_VERSION_MAJOR;
783 version.drv_minor = KGSL_VERSION_MINOR;
784 version.dev_major = dev_priv->device->ver_major;
785 version.dev_minor = dev_priv->device->ver_minor;
786
787 if (copy_to_user(param->value, &version, sizeof(version)))
788 result = -EFAULT;
789
790 break;
791 }
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700792 case KGSL_PROP_GPU_RESET_STAT:
793 {
794 /* Return reset status of given context and clear it */
795 uint32_t id;
796 struct kgsl_context *context;
797
798 if (param->sizebytes != sizeof(unsigned int)) {
799 result = -EINVAL;
800 break;
801 }
802 /* We expect the value passed in to contain the context id */
803 if (copy_from_user(&id, param->value,
804 sizeof(unsigned int))) {
805 result = -EFAULT;
806 break;
807 }
808 context = kgsl_find_context(dev_priv, id);
809 if (!context) {
810 result = -EINVAL;
811 break;
812 }
813 /*
814 * Copy the reset status to value which also serves as
815 * the out parameter
816 */
817 if (copy_to_user(param->value, &(context->reset_status),
818 sizeof(unsigned int))) {
819 result = -EFAULT;
820 break;
821 }
822 /* Clear reset status once its been queried */
823 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
824 break;
825 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 default:
827 result = dev_priv->device->ftbl->getproperty(
828 dev_priv->device, param->type,
829 param->value, param->sizebytes);
830 }
831
832
833 return result;
834}
835
836static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
837 *dev_priv, unsigned int cmd,
838 void *data)
839{
840 int result = 0;
841 struct kgsl_device_waittimestamp *param = data;
842
843 /* Set the active count so that suspend doesn't do the
844 wrong thing */
845
846 dev_priv->device->active_cnt++;
847
Norman Geed7402ff2011-10-28 08:51:11 -0600848 trace_kgsl_waittimestamp_entry(dev_priv->device, param);
849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
851 param->timestamp,
852 param->timeout);
853
Norman Geed7402ff2011-10-28 08:51:11 -0600854 trace_kgsl_waittimestamp_exit(dev_priv->device, result);
855
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 /* Fire off any pending suspend operations that are in flight */
857
858 INIT_COMPLETION(dev_priv->device->suspend_gate);
859 dev_priv->device->active_cnt--;
860 complete(&dev_priv->device->suspend_gate);
861
862 return result;
863}
864static bool check_ibdesc(struct kgsl_device_private *dev_priv,
865 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
866 bool parse)
867{
868 bool result = true;
869 unsigned int i;
870 for (i = 0; i < numibs; i++) {
871 struct kgsl_mem_entry *entry;
872 spin_lock(&dev_priv->process_priv->mem_lock);
873 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
874 ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
875 spin_unlock(&dev_priv->process_priv->mem_lock);
876 if (entry == NULL) {
877 KGSL_DRV_ERR(dev_priv->device,
878 "invalid cmd buffer gpuaddr %08x " \
879 "sizedwords %d\n", ibdesc[i].gpuaddr,
880 ibdesc[i].sizedwords);
881 result = false;
882 break;
883 }
884
885 if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
886 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
887 KGSL_DRV_ERR(dev_priv->device,
888 "invalid cmd buffer gpuaddr %08x " \
889 "sizedwords %d numibs %d/%d\n",
890 ibdesc[i].gpuaddr,
891 ibdesc[i].sizedwords, i+1, numibs);
892 result = false;
893 break;
894 }
895 }
896 return result;
897}
898
899static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
900 unsigned int cmd, void *data)
901{
902 int result = 0;
903 struct kgsl_ringbuffer_issueibcmds *param = data;
904 struct kgsl_ibdesc *ibdesc;
905 struct kgsl_context *context;
906
907#ifdef CONFIG_MSM_KGSL_DRM
908 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
909#endif
910
911 context = kgsl_find_context(dev_priv, param->drawctxt_id);
912 if (context == NULL) {
913 result = -EINVAL;
914 KGSL_DRV_ERR(dev_priv->device,
915 "invalid drawctxt drawctxt_id %d\n",
916 param->drawctxt_id);
917 goto done;
918 }
919
920 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
921 KGSL_DRV_INFO(dev_priv->device,
922 "Using IB list mode for ib submission, numibs: %d\n",
923 param->numibs);
924 if (!param->numibs) {
925 KGSL_DRV_ERR(dev_priv->device,
926 "Invalid numibs as parameter: %d\n",
927 param->numibs);
928 result = -EINVAL;
929 goto done;
930 }
931
932 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
933 GFP_KERNEL);
934 if (!ibdesc) {
935 KGSL_MEM_ERR(dev_priv->device,
936 "kzalloc(%d) failed\n",
937 sizeof(struct kgsl_ibdesc) * param->numibs);
938 result = -ENOMEM;
939 goto done;
940 }
941
942 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
943 sizeof(struct kgsl_ibdesc) * param->numibs)) {
944 result = -EFAULT;
945 KGSL_DRV_ERR(dev_priv->device,
946 "copy_from_user failed\n");
947 goto free_ibdesc;
948 }
949 } else {
950 KGSL_DRV_INFO(dev_priv->device,
951 "Using single IB submission mode for ib submission\n");
952 /* If user space driver is still using the old mode of
953 * submitting single ib then we need to support that as well */
954 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
955 if (!ibdesc) {
956 KGSL_MEM_ERR(dev_priv->device,
957 "kzalloc(%d) failed\n",
958 sizeof(struct kgsl_ibdesc));
959 result = -ENOMEM;
960 goto done;
961 }
962 ibdesc[0].gpuaddr = param->ibdesc_addr;
963 ibdesc[0].sizedwords = param->numibs;
964 param->numibs = 1;
965 }
966
967 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
968 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
969 result = -EINVAL;
970 goto free_ibdesc;
971 }
972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
974 context,
975 ibdesc,
976 param->numibs,
977 &param->timestamp,
978 param->flags);
979
Norman Geed7402ff2011-10-28 08:51:11 -0600980 trace_kgsl_issueibcmds(dev_priv->device, param, result);
981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 if (result != 0)
983 goto free_ibdesc;
984
985 /* this is a check to try to detect if a command buffer was freed
986 * during issueibcmds().
987 */
988 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
989 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
990 result = -EINVAL;
991 goto free_ibdesc;
992 }
993
994free_ibdesc:
995 kfree(ibdesc);
996done:
997
998#ifdef CONFIG_MSM_KGSL_DRM
999 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
1000#endif
1001
1002 return result;
1003}
1004
1005static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1006 *dev_priv, unsigned int cmd,
1007 void *data)
1008{
1009 struct kgsl_cmdstream_readtimestamp *param = data;
1010
1011 param->timestamp =
1012 dev_priv->device->ftbl->readtimestamp(dev_priv->device,
1013 param->type);
1014
Norman Geed7402ff2011-10-28 08:51:11 -06001015 trace_kgsl_readtimestamp(dev_priv->device, param);
1016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017 return 0;
1018}
1019
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001020static void kgsl_freemem_event_cb(struct kgsl_device *device,
1021 void *priv, u32 timestamp)
1022{
1023 struct kgsl_mem_entry *entry = priv;
1024 spin_lock(&entry->priv->mem_lock);
1025 list_del(&entry->list);
1026 spin_unlock(&entry->priv->mem_lock);
1027 kgsl_mem_entry_put(entry);
1028}
1029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1031 *dev_priv, unsigned int cmd,
1032 void *data)
1033{
1034 int result = 0;
1035 struct kgsl_cmdstream_freememontimestamp *param = data;
1036 struct kgsl_mem_entry *entry = NULL;
1037
1038 spin_lock(&dev_priv->process_priv->mem_lock);
1039 entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 spin_unlock(&dev_priv->process_priv->mem_lock);
1041
1042 if (entry) {
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001043 result = kgsl_add_event(dev_priv->device, param->timestamp,
1044 kgsl_freemem_event_cb, entry, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 } else {
1046 KGSL_DRV_ERR(dev_priv->device,
1047 "invalid gpuaddr %08x\n", param->gpuaddr);
1048 result = -EINVAL;
1049 }
1050
1051 return result;
1052}
1053
1054static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1055 unsigned int cmd, void *data)
1056{
1057 int result = 0;
1058 struct kgsl_drawctxt_create *param = data;
1059 struct kgsl_context *context = NULL;
1060
1061 context = kgsl_create_context(dev_priv);
1062
1063 if (context == NULL) {
1064 result = -ENOMEM;
1065 goto done;
1066 }
1067
1068 if (dev_priv->device->ftbl->drawctxt_create)
1069 result = dev_priv->device->ftbl->drawctxt_create(
1070 dev_priv->device, dev_priv->process_priv->pagetable,
1071 context, param->flags);
1072
1073 param->drawctxt_id = context->id;
1074
1075done:
1076 if (result && context)
1077 kgsl_destroy_context(dev_priv, context);
1078
1079 return result;
1080}
1081
1082static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1083 unsigned int cmd, void *data)
1084{
1085 int result = 0;
1086 struct kgsl_drawctxt_destroy *param = data;
1087 struct kgsl_context *context;
1088
1089 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1090
1091 if (context == NULL) {
1092 result = -EINVAL;
1093 goto done;
1094 }
1095
1096 if (dev_priv->device->ftbl->drawctxt_destroy)
1097 dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
1098 context);
1099
1100 kgsl_destroy_context(dev_priv, context);
1101
1102done:
1103 return result;
1104}
1105
1106static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1107 unsigned int cmd, void *data)
1108{
1109 int result = 0;
1110 struct kgsl_sharedmem_free *param = data;
1111 struct kgsl_process_private *private = dev_priv->process_priv;
1112 struct kgsl_mem_entry *entry = NULL;
1113
1114 spin_lock(&private->mem_lock);
1115 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1116 if (entry)
1117 list_del(&entry->list);
1118 spin_unlock(&private->mem_lock);
1119
1120 if (entry) {
1121 kgsl_mem_entry_put(entry);
1122 } else {
1123 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1124 result = -EINVAL;
1125 }
1126
1127 return result;
1128}
1129
1130static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1131{
1132 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133
1134 down_read(&current->mm->mmap_sem);
1135 vma = find_vma(current->mm, addr);
1136 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001137 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140 return vma;
1141}
1142
1143static long
1144kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
1145 unsigned int cmd, void *data)
1146{
1147 int result = 0, len = 0;
1148 struct kgsl_process_private *private = dev_priv->process_priv;
1149 struct kgsl_sharedmem_from_vmalloc *param = data;
1150 struct kgsl_mem_entry *entry = NULL;
1151 struct vm_area_struct *vma;
1152
1153 if (!kgsl_mmu_enabled())
1154 return -ENODEV;
1155
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001156 if (!param->hostptr) {
1157 KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
1158 result = -EINVAL;
1159 goto error;
1160 }
1161
1162 vma = kgsl_get_vma_from_start_addr(param->hostptr);
1163 if (!vma) {
1164 result = -EINVAL;
1165 goto error;
1166 }
Jordan Crouse2c542b62011-07-26 08:30:20 -06001167
1168 /*
1169 * If the user specified a length, use it, otherwise try to
1170 * infer the length if the vma region
1171 */
1172 if (param->gpuaddr != 0) {
1173 len = param->gpuaddr;
1174 } else {
1175 /*
1176 * For this to work, we have to assume the VMA region is only
1177 * for this single allocation. If it isn't, then bail out
1178 */
1179 if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
1180 KGSL_CORE_ERR("VMA region does not match hostaddr\n");
1181 result = -EINVAL;
1182 goto error;
1183 }
1184
1185 len = vma->vm_end - vma->vm_start;
1186 }
1187
1188 /* Make sure it fits */
1189 if (len == 0 || param->hostptr + len > vma->vm_end) {
1190 KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 result = -EINVAL;
1192 goto error;
1193 }
1194
1195 entry = kgsl_mem_entry_create();
1196 if (entry == NULL) {
1197 result = -ENOMEM;
1198 goto error;
1199 }
1200
1201 result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
1202 private->pagetable, len,
1203 param->flags);
1204 if (result != 0)
1205 goto error_free_entry;
1206
1207 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1208
1209 result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
1210 if (result) {
1211 KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
1212 goto error_free_vmalloc;
1213 }
1214
1215 param->gpuaddr = entry->memdesc.gpuaddr;
1216
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001217 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218
1219 kgsl_mem_entry_attach_process(entry, private);
1220
1221 /* Process specific statistics */
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001222 kgsl_process_add_stats(private, entry->memtype, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223
1224 kgsl_check_idle(dev_priv->device);
1225 return 0;
1226
1227error_free_vmalloc:
1228 kgsl_sharedmem_free(&entry->memdesc);
1229
1230error_free_entry:
1231 kfree(entry);
1232
1233error:
1234 kgsl_check_idle(dev_priv->device);
1235 return result;
1236}
1237
1238static inline int _check_region(unsigned long start, unsigned long size,
1239 uint64_t len)
1240{
1241 uint64_t end = ((uint64_t) start) + size;
1242 return (end > len);
1243}
1244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001245static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1246 unsigned long *vstart, struct file **filep)
1247{
1248 struct file *fbfile;
1249 int ret = 0;
1250 dev_t rdev;
1251 struct fb_info *info;
1252
1253 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001254#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001255 if (!get_pmem_file(fd, start, vstart, len, filep))
1256 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001257#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258
1259 fbfile = fget(fd);
1260 if (fbfile == NULL) {
1261 KGSL_CORE_ERR("fget_light failed\n");
1262 return -1;
1263 }
1264
1265 rdev = fbfile->f_dentry->d_inode->i_rdev;
1266 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1267 if (info) {
1268 *start = info->fix.smem_start;
1269 *len = info->fix.smem_len;
1270 *vstart = (unsigned long)__va(info->fix.smem_start);
1271 ret = 0;
1272 } else {
1273 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1274 MINOR(rdev));
1275 ret = -1;
1276 }
1277
1278 fput(fbfile);
1279
1280 return ret;
1281}
1282
1283static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1284 struct kgsl_pagetable *pagetable,
1285 unsigned int fd, unsigned int offset,
1286 size_t size)
1287{
1288 int ret;
1289 unsigned long phys, virt, len;
1290 struct file *filep;
1291
1292 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1293 if (ret)
1294 return ret;
1295
Wei Zou4061c0b2011-07-08 10:24:22 -07001296 if (phys == 0) {
1297 ret = -EINVAL;
1298 goto err;
1299 }
1300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301 if (offset >= len) {
1302 ret = -EINVAL;
1303 goto err;
1304 }
1305
1306 if (size == 0)
1307 size = len;
1308
1309 /* Adjust the size of the region to account for the offset */
1310 size += offset & ~PAGE_MASK;
1311
1312 size = ALIGN(size, PAGE_SIZE);
1313
1314 if (_check_region(offset & PAGE_MASK, size, len)) {
1315 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1316 "than pmem region length %ld\n",
1317 offset & PAGE_MASK, size, len);
1318 ret = -EINVAL;
1319 goto err;
1320
1321 }
1322
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001323 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324
1325 entry->memdesc.pagetable = pagetable;
1326 entry->memdesc.size = size;
1327 entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
1328 entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001329
1330 ret = memdesc_sg_phys(&entry->memdesc,
1331 phys + (offset & PAGE_MASK), size);
1332 if (ret)
1333 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334
1335 return 0;
1336err:
Jordan Crousefd978432011-09-02 14:34:32 -06001337#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001339#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001340 return ret;
1341}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342
Jordan Croused17e9aa2011-10-12 16:57:48 -06001343static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1344 void *addr, int size)
1345{
1346 int i;
1347 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1348 unsigned long paddr = (unsigned long) addr;
1349
Jeff Boody28afec42012-01-18 15:47:46 -07001350 memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001351 if (memdesc->sg == NULL)
1352 return -ENOMEM;
1353
1354 memdesc->sglen = sglen;
1355 sg_init_table(memdesc->sg, sglen);
1356
1357 spin_lock(&current->mm->page_table_lock);
1358
1359 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1360 struct page *page;
1361 pmd_t *ppmd;
1362 pte_t *ppte;
1363 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1364
1365 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1366 goto err;
1367
1368 ppmd = pmd_offset(ppgd, paddr);
1369 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1370 goto err;
1371
1372 ppte = pte_offset_map(ppmd, paddr);
1373 if (ppte == NULL)
1374 goto err;
1375
1376 page = pfn_to_page(pte_pfn(*ppte));
1377 if (!page)
1378 goto err;
1379
1380 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1381 pte_unmap(ppte);
1382 }
1383
1384 spin_unlock(&current->mm->page_table_lock);
1385
1386 return 0;
1387
1388err:
1389 spin_unlock(&current->mm->page_table_lock);
Jeff Boody28afec42012-01-18 15:47:46 -07001390 vfree(memdesc->sg);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001391 memdesc->sg = NULL;
1392
1393 return -EINVAL;
1394}
1395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1397 struct kgsl_pagetable *pagetable,
1398 void *hostptr, unsigned int offset,
1399 size_t size)
1400{
1401 struct vm_area_struct *vma;
1402 unsigned int len;
1403
1404 down_read(&current->mm->mmap_sem);
1405 vma = find_vma(current->mm, (unsigned int) hostptr);
1406 up_read(&current->mm->mmap_sem);
1407
1408 if (!vma) {
1409 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1410 return -EINVAL;
1411 }
1412
1413 /* We don't necessarily start at vma->vm_start */
1414 len = vma->vm_end - (unsigned long) hostptr;
1415
1416 if (offset >= len)
1417 return -EINVAL;
1418
1419 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1420 !KGSL_IS_PAGE_ALIGNED(len)) {
1421 KGSL_CORE_ERR("user address len(%u)"
1422 "and start(%p) must be page"
1423 "aligned\n", len, hostptr);
1424 return -EINVAL;
1425 }
1426
1427 if (size == 0)
1428 size = len;
1429
1430 /* Adjust the size of the region to account for the offset */
1431 size += offset & ~PAGE_MASK;
1432
1433 size = ALIGN(size, PAGE_SIZE);
1434
1435 if (_check_region(offset & PAGE_MASK, size, len)) {
1436 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1437 "than region length %d\n",
1438 offset & PAGE_MASK, size, len);
1439 return -EINVAL;
1440 }
1441
1442 entry->memdesc.pagetable = pagetable;
1443 entry->memdesc.size = size;
1444 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001445
Jordan Croused17e9aa2011-10-12 16:57:48 -06001446 return memdesc_sg_virt(&entry->memdesc,
1447 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448}
1449
1450#ifdef CONFIG_ASHMEM
1451static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1452 struct kgsl_pagetable *pagetable,
1453 int fd, void *hostptr, size_t size)
1454{
1455 int ret;
1456 struct vm_area_struct *vma;
1457 struct file *filep, *vmfile;
1458 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001459 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001460
Jordan Crouse2c542b62011-07-26 08:30:20 -06001461 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 if (vma == NULL)
1463 return -EINVAL;
1464
Jordan Crouse2c542b62011-07-26 08:30:20 -06001465 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1466 KGSL_CORE_ERR("Invalid vma region\n");
1467 return -EINVAL;
1468 }
1469
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 len = vma->vm_end - vma->vm_start;
1471
1472 if (size == 0)
1473 size = len;
1474
1475 if (size != len) {
1476 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1477 size, hostptr);
1478 return -EINVAL;
1479 }
1480
1481 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1482
1483 if (ret) {
1484 KGSL_CORE_ERR("get_ashmem_file failed\n");
1485 return ret;
1486 }
1487
1488 if (vmfile != vma->vm_file) {
1489 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1490 ret = -EINVAL;
1491 goto err;
1492 }
1493
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001494 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495 entry->memdesc.pagetable = pagetable;
1496 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1497 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001498
1499 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1500 if (ret)
1501 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502
1503 return 0;
1504
1505err:
1506 put_ashmem_file(filep);
1507 return ret;
1508}
1509#else
1510static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1511 struct kgsl_pagetable *pagetable,
1512 int fd, void *hostptr, size_t size)
1513{
1514 return -EINVAL;
1515}
1516#endif
1517
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001518static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1519 struct kgsl_pagetable *pagetable, int fd)
1520{
1521 struct ion_handle *handle;
1522 struct scatterlist *s;
1523 unsigned long flags;
1524
1525 if (kgsl_ion_client == NULL) {
1526 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
1527 if (kgsl_ion_client == NULL)
1528 return -ENODEV;
1529 }
1530
1531 handle = ion_import_fd(kgsl_ion_client, fd);
1532 if (IS_ERR_OR_NULL(handle))
1533 return PTR_ERR(handle);
1534
1535 entry->memtype = KGSL_MEM_ENTRY_ION;
1536 entry->priv_data = handle;
1537 entry->memdesc.pagetable = pagetable;
1538 entry->memdesc.size = 0;
1539
1540 if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
1541 goto err;
1542
1543 entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
1544
1545 if (IS_ERR_OR_NULL(entry->memdesc.sg))
1546 goto err;
1547
1548 /* Calculate the size of the memdesc from the sglist */
1549
1550 entry->memdesc.sglen = 0;
1551
1552 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1553 entry->memdesc.size += s->length;
1554 entry->memdesc.sglen++;
1555 }
1556
1557 return 0;
1558err:
1559 ion_free(kgsl_ion_client, handle);
1560 return -ENOMEM;
1561}
1562
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1564 unsigned int cmd, void *data)
1565{
1566 int result = -EINVAL;
1567 struct kgsl_map_user_mem *param = data;
1568 struct kgsl_mem_entry *entry = NULL;
1569 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001570 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001571
1572 entry = kgsl_mem_entry_create();
1573
1574 if (entry == NULL)
1575 return -ENOMEM;
1576
Jason848741a2011-07-12 10:24:25 -07001577 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1578 memtype = KGSL_USER_MEM_TYPE_PMEM;
1579 else
1580 memtype = param->memtype;
1581
1582 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583 case KGSL_USER_MEM_TYPE_PMEM:
1584 if (param->fd == 0 || param->len == 0)
1585 break;
1586
1587 result = kgsl_setup_phys_file(entry, private->pagetable,
1588 param->fd, param->offset,
1589 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001590 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 break;
1592
1593 case KGSL_USER_MEM_TYPE_ADDR:
1594 if (!kgsl_mmu_enabled()) {
1595 KGSL_DRV_ERR(dev_priv->device,
1596 "Cannot map paged memory with the "
1597 "MMU disabled\n");
1598 break;
1599 }
1600
1601 if (param->hostptr == 0)
1602 break;
1603
1604 result = kgsl_setup_hostptr(entry, private->pagetable,
1605 (void *) param->hostptr,
1606 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001607 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 break;
1609
1610 case KGSL_USER_MEM_TYPE_ASHMEM:
1611 if (!kgsl_mmu_enabled()) {
1612 KGSL_DRV_ERR(dev_priv->device,
1613 "Cannot map paged memory with the "
1614 "MMU disabled\n");
1615 break;
1616 }
1617
1618 if (param->hostptr == 0)
1619 break;
1620
1621 result = kgsl_setup_ashmem(entry, private->pagetable,
1622 param->fd, (void *) param->hostptr,
1623 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001624
1625 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001627 case KGSL_USER_MEM_TYPE_ION:
1628 result = kgsl_setup_ion(entry, private->pagetable,
1629 param->fd);
1630 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001631 default:
Jason848741a2011-07-12 10:24:25 -07001632 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633 break;
1634 }
1635
1636 if (result)
1637 goto error;
1638
1639 result = kgsl_mmu_map(private->pagetable,
1640 &entry->memdesc,
1641 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1642
1643 if (result)
1644 goto error_put_file_ptr;
1645
1646 /* Adjust the returned value for a non 4k aligned offset */
1647 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001650 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001652 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653
1654 kgsl_mem_entry_attach_process(entry, private);
1655
1656 kgsl_check_idle(dev_priv->device);
1657 return result;
1658
1659 error_put_file_ptr:
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001660 if (entry->priv_data)
1661 fput(entry->priv_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662
1663error:
1664 kfree(entry);
1665 kgsl_check_idle(dev_priv->device);
1666 return result;
1667}
1668
1669/*This function flushes a graphics memory allocation from CPU cache
1670 *when caching is enabled with MMU*/
1671static long
1672kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1673 unsigned int cmd, void *data)
1674{
1675 int result = 0;
1676 struct kgsl_mem_entry *entry;
1677 struct kgsl_sharedmem_free *param = data;
1678 struct kgsl_process_private *private = dev_priv->process_priv;
1679
1680 spin_lock(&private->mem_lock);
1681 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1682 if (!entry) {
1683 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1684 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001685 goto done;
1686 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001687 if (!entry->memdesc.hostptr) {
1688 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1689 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001690 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001692
1693 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001695 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 return result;
1697}
1698
1699static long
1700kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1701 unsigned int cmd, void *data)
1702{
1703 struct kgsl_process_private *private = dev_priv->process_priv;
1704 struct kgsl_gpumem_alloc *param = data;
1705 struct kgsl_mem_entry *entry;
1706 int result;
1707
1708 entry = kgsl_mem_entry_create();
1709 if (entry == NULL)
1710 return -ENOMEM;
1711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001712 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1713 param->size, param->flags);
1714
1715 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001716 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717 kgsl_mem_entry_attach_process(entry, private);
1718 param->gpuaddr = entry->memdesc.gpuaddr;
1719
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001720 kgsl_process_add_stats(private, entry->memtype, param->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 } else
1722 kfree(entry);
1723
1724 kgsl_check_idle(dev_priv->device);
1725 return result;
1726}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001727static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1728 unsigned int cmd, void *data)
1729{
1730 int result = 0;
1731 struct kgsl_cff_syncmem *param = data;
1732 struct kgsl_process_private *private = dev_priv->process_priv;
1733 struct kgsl_mem_entry *entry = NULL;
1734
1735 spin_lock(&private->mem_lock);
1736 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1737 if (entry)
1738 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1739 param->len, true);
1740 else
1741 result = -EINVAL;
1742 spin_unlock(&private->mem_lock);
1743 return result;
1744}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001745
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001746static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1747 unsigned int cmd, void *data)
1748{
1749 int result = 0;
1750 struct kgsl_cff_user_event *param = data;
1751
1752 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1753 param->op3, param->op4, param->op5);
1754
1755 return result;
1756}
1757
Jordan Croused4bc9d22011-11-17 13:39:21 -07001758#ifdef CONFIG_GENLOCK
1759struct kgsl_genlock_event_priv {
1760 struct genlock_handle *handle;
1761 struct genlock *lock;
1762};
1763
1764/**
1765 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
1766 * @device - The KGSL device that expired the timestamp
1767 * @priv - private data for the event
1768 * @timestamp - the timestamp that triggered the event
1769 *
1770 * Release a genlock lock following the expiration of a timestamp
1771 */
1772
1773static void kgsl_genlock_event_cb(struct kgsl_device *device,
1774 void *priv, u32 timestamp)
1775{
1776 struct kgsl_genlock_event_priv *ev = priv;
1777 int ret;
1778
1779 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
1780 if (ret)
1781 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
1782
1783 genlock_put_handle(ev->handle);
1784
1785 kfree(ev);
1786}
1787
1788/**
1789 * kgsl_add_genlock-event - Create a new genlock event
1790 * @device - KGSL device to create the event on
1791 * @timestamp - Timestamp to trigger the event
1792 * @data - User space buffer containing struct kgsl_genlock_event_priv
1793 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001794 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07001795 * @returns 0 on success or error code on error
1796 *
1797 * Attack to a genlock handle and register an event to release the
1798 * genlock lock when the timestamp expires
1799 */
1800
1801static int kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001802 u32 timestamp, void __user *data, int len,
1803 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001804{
1805 struct kgsl_genlock_event_priv *event;
1806 struct kgsl_timestamp_event_genlock priv;
1807 int ret;
1808
1809 if (len != sizeof(priv))
1810 return -EINVAL;
1811
1812 if (copy_from_user(&priv, data, sizeof(priv)))
1813 return -EFAULT;
1814
1815 event = kzalloc(sizeof(*event), GFP_KERNEL);
1816
1817 if (event == NULL)
1818 return -ENOMEM;
1819
1820 event->handle = genlock_get_handle_fd(priv.handle);
1821
1822 if (IS_ERR(event->handle)) {
1823 int ret = PTR_ERR(event->handle);
1824 kfree(event);
1825 return ret;
1826 }
1827
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001828 ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event,
1829 owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001830 if (ret)
1831 kfree(event);
1832
1833 return ret;
1834}
1835#else
1836static long kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001837 u32 timestamp, void __user *data, int len,
1838 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001839{
1840 return -EINVAL;
1841}
1842#endif
1843
1844/**
1845 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
1846 * @dev_priv - pointer to the private device structure
1847 * @cmd - the ioctl cmd passed from kgsl_ioctl
1848 * @data - the user data buffer from kgsl_ioctl
1849 * @returns 0 on success or error code on failure
1850 */
1851
1852static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
1853 unsigned int cmd, void *data)
1854{
1855 struct kgsl_timestamp_event *param = data;
1856 int ret;
1857
1858 switch (param->type) {
1859 case KGSL_TIMESTAMP_EVENT_GENLOCK:
1860 ret = kgsl_add_genlock_event(dev_priv->device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001861 param->timestamp, param->priv, param->len,
1862 dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001863 break;
1864 default:
1865 ret = -EINVAL;
1866 }
1867
1868 return ret;
1869}
1870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001871typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
1872 unsigned int, void *);
1873
1874#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
1875 [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
1876
1877static const struct {
1878 unsigned int cmd;
1879 kgsl_ioctl_func_t func;
1880 int lock;
1881} kgsl_ioctl_funcs[] = {
1882 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
1883 kgsl_ioctl_device_getproperty, 1),
1884 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
1885 kgsl_ioctl_device_waittimestamp, 1),
1886 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
1887 kgsl_ioctl_rb_issueibcmds, 1),
1888 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
1889 kgsl_ioctl_cmdstream_readtimestamp, 1),
1890 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
1891 kgsl_ioctl_cmdstream_freememontimestamp, 1),
1892 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
1893 kgsl_ioctl_drawctxt_create, 1),
1894 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
1895 kgsl_ioctl_drawctxt_destroy, 1),
1896 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
1897 kgsl_ioctl_map_user_mem, 0),
1898 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
1899 kgsl_ioctl_map_user_mem, 0),
1900 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
1901 kgsl_ioctl_sharedmem_free, 0),
1902 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
1903 kgsl_ioctl_sharedmem_from_vmalloc, 0),
1904 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
1905 kgsl_ioctl_sharedmem_flush_cache, 0),
1906 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
1907 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001908 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
1909 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001910 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
1911 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07001912 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Lucille Sylvester9329cf02011-12-02 14:30:41 -07001913 kgsl_ioctl_timestamp_event, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001914};
1915
1916static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1917{
1918 struct kgsl_device_private *dev_priv = filep->private_data;
1919 unsigned int nr = _IOC_NR(cmd);
1920 kgsl_ioctl_func_t func;
1921 int lock, ret;
1922 char ustack[64];
1923 void *uptr = NULL;
1924
1925 BUG_ON(dev_priv == NULL);
1926
1927 /* Workaround for an previously incorrectly defined ioctl code.
1928 This helps ensure binary compatability */
1929
1930 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
1931 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07001932 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
1933 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934
1935 if (cmd & (IOC_IN | IOC_OUT)) {
1936 if (_IOC_SIZE(cmd) < sizeof(ustack))
1937 uptr = ustack;
1938 else {
1939 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
1940 if (uptr == NULL) {
1941 KGSL_MEM_ERR(dev_priv->device,
1942 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
1943 ret = -ENOMEM;
1944 goto done;
1945 }
1946 }
1947
1948 if (cmd & IOC_IN) {
1949 if (copy_from_user(uptr, (void __user *) arg,
1950 _IOC_SIZE(cmd))) {
1951 ret = -EFAULT;
1952 goto done;
1953 }
1954 } else
1955 memset(uptr, 0, _IOC_SIZE(cmd));
1956 }
1957
1958 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
1959 kgsl_ioctl_funcs[nr].func != NULL) {
1960 func = kgsl_ioctl_funcs[nr].func;
1961 lock = kgsl_ioctl_funcs[nr].lock;
1962 } else {
1963 func = dev_priv->device->ftbl->ioctl;
1964 if (!func) {
1965 KGSL_DRV_INFO(dev_priv->device,
1966 "invalid ioctl code %08x\n", cmd);
Jeremy Gebbenc15b4612012-01-09 09:44:11 -07001967 ret = -ENOIOCTLCMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 goto done;
1969 }
1970 lock = 1;
1971 }
1972
1973 if (lock) {
1974 mutex_lock(&dev_priv->device->mutex);
1975 kgsl_check_suspended(dev_priv->device);
1976 }
1977
1978 ret = func(dev_priv, cmd, uptr);
1979
1980 if (lock) {
1981 kgsl_check_idle_locked(dev_priv->device);
1982 mutex_unlock(&dev_priv->device->mutex);
1983 }
1984
1985 if (ret == 0 && (cmd & IOC_OUT)) {
1986 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
1987 ret = -EFAULT;
1988 }
1989
1990done:
1991 if (_IOC_SIZE(cmd) >= sizeof(ustack))
1992 kfree(uptr);
1993
1994 return ret;
1995}
1996
1997static int
1998kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
1999{
2000 struct kgsl_memdesc *memdesc = &device->memstore;
2001 int result;
2002 unsigned int vma_size = vma->vm_end - vma->vm_start;
2003
2004 /* The memstore can only be mapped as read only */
2005
2006 if (vma->vm_flags & VM_WRITE)
2007 return -EPERM;
2008
2009 if (memdesc->size != vma_size) {
2010 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
2011 vma_size, memdesc->size);
2012 return -EINVAL;
2013 }
2014
2015 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2016
2017 result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
2018 vma_size, vma->vm_page_prot);
2019 if (result != 0)
2020 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2021 result);
2022
2023 return result;
2024}
2025
Jordan Crouse4283e172011-09-26 14:45:47 -06002026/*
2027 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2028 * Increase the refcount to make sure that the accounting stays correct
2029 */
2030
2031static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2032{
2033 struct kgsl_mem_entry *entry = vma->vm_private_data;
2034 kgsl_mem_entry_get(entry);
2035}
2036
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002037static int
2038kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2039{
2040 struct kgsl_mem_entry *entry = vma->vm_private_data;
2041
Jordan Croused17e9aa2011-10-12 16:57:48 -06002042 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 return VM_FAULT_SIGBUS;
2044
2045 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2046}
2047
2048static void
2049kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2050{
2051 struct kgsl_mem_entry *entry = vma->vm_private_data;
2052 kgsl_mem_entry_put(entry);
2053}
2054
2055static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002056 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057 .fault = kgsl_gpumem_vm_fault,
2058 .close = kgsl_gpumem_vm_close,
2059};
2060
2061static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2062{
2063 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 struct kgsl_device_private *dev_priv = file->private_data;
2065 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002066 struct kgsl_mem_entry *tmp, *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002067 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068
2069 /* Handle leagacy behavior for memstore */
2070
2071 if (vma_offset == device->memstore.physaddr)
2072 return kgsl_mmap_memstore(device, vma);
2073
2074 /* Find a chunk of GPU memory */
2075
2076 spin_lock(&private->mem_lock);
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002077 list_for_each_entry(tmp, &private->mem_list, list) {
2078 if (vma_offset == tmp->memdesc.gpuaddr) {
2079 kgsl_mem_entry_get(tmp);
2080 entry = tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002081 break;
2082 }
2083 }
2084 spin_unlock(&private->mem_lock);
2085
2086 if (entry == NULL)
2087 return -EINVAL;
2088
Jordan Croused17e9aa2011-10-12 16:57:48 -06002089 if (!entry->memdesc.ops ||
2090 !entry->memdesc.ops->vmflags ||
2091 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 return -EINVAL;
2093
2094 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2095
2096 vma->vm_private_data = entry;
2097 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2098 vma->vm_ops = &kgsl_gpumem_vm_ops;
2099 vma->vm_file = file;
2100
2101 return 0;
2102}
2103
2104static const struct file_operations kgsl_fops = {
2105 .owner = THIS_MODULE,
2106 .release = kgsl_release,
2107 .open = kgsl_open,
2108 .mmap = kgsl_mmap,
2109 .unlocked_ioctl = kgsl_ioctl,
2110};
2111
2112struct kgsl_driver kgsl_driver = {
2113 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2114 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2115 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2116};
2117EXPORT_SYMBOL(kgsl_driver);
2118
2119void kgsl_unregister_device(struct kgsl_device *device)
2120{
2121 int minor;
2122
2123 mutex_lock(&kgsl_driver.devlock);
2124 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2125 if (device == kgsl_driver.devp[minor])
2126 break;
2127 }
2128
2129 mutex_unlock(&kgsl_driver.devlock);
2130
2131 if (minor == KGSL_DEVICE_MAX)
2132 return;
2133
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002134 kgsl_device_snapshot_close(device);
2135
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002136 kgsl_cffdump_close(device->id);
2137 kgsl_pwrctrl_uninit_sysfs(device);
2138
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002139 if (cpu_is_msm8x60())
2140 wake_lock_destroy(&device->idle_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141
2142 idr_destroy(&device->context_idr);
2143
2144 if (device->memstore.hostptr)
2145 kgsl_sharedmem_free(&device->memstore);
2146
2147 kgsl_mmu_close(device);
2148
2149 if (device->work_queue) {
2150 destroy_workqueue(device->work_queue);
2151 device->work_queue = NULL;
2152 }
2153
2154 device_destroy(kgsl_driver.class,
2155 MKDEV(MAJOR(kgsl_driver.major), minor));
2156
2157 mutex_lock(&kgsl_driver.devlock);
2158 kgsl_driver.devp[minor] = NULL;
2159 mutex_unlock(&kgsl_driver.devlock);
2160}
2161EXPORT_SYMBOL(kgsl_unregister_device);
2162
2163int
2164kgsl_register_device(struct kgsl_device *device)
2165{
2166 int minor, ret;
2167 dev_t dev;
2168
2169 /* Find a minor for the device */
2170
2171 mutex_lock(&kgsl_driver.devlock);
2172 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2173 if (kgsl_driver.devp[minor] == NULL) {
2174 kgsl_driver.devp[minor] = device;
2175 break;
2176 }
2177 }
2178
2179 mutex_unlock(&kgsl_driver.devlock);
2180
2181 if (minor == KGSL_DEVICE_MAX) {
2182 KGSL_CORE_ERR("minor devices exhausted\n");
2183 return -ENODEV;
2184 }
2185
2186 /* Create the device */
2187 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2188 device->dev = device_create(kgsl_driver.class,
2189 device->parentdev,
2190 dev, device,
2191 device->name);
2192
2193 if (IS_ERR(device->dev)) {
2194 ret = PTR_ERR(device->dev);
2195 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
2196 goto err_devlist;
2197 }
2198
2199 dev_set_drvdata(device->parentdev, device);
2200
2201 /* Generic device initialization */
2202 init_waitqueue_head(&device->wait_queue);
2203
2204 kgsl_cffdump_open(device->id);
2205
2206 init_completion(&device->hwaccess_gate);
2207 init_completion(&device->suspend_gate);
2208
2209 ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
2210
2211 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2212 ret = kgsl_create_device_workqueue(device);
2213 if (ret)
2214 goto err_devlist;
2215
2216 INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
Jordan Crouse1bf80aa2011-10-12 16:57:47 -06002217 INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218
Jordan Croused4bc9d22011-11-17 13:39:21 -07002219 INIT_LIST_HEAD(&device->events);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002220
2221 ret = kgsl_mmu_init(device);
2222 if (ret != 0)
2223 goto err_dest_work_q;
2224
2225 ret = kgsl_allocate_contiguous(&device->memstore,
2226 sizeof(struct kgsl_devmemstore));
2227
2228 if (ret != 0)
2229 goto err_close_mmu;
2230
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002231 if (cpu_is_msm8x60())
2232 wake_lock_init(&device->idle_wakelock,
2233 WAKE_LOCK_IDLE, device->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002234
2235 idr_init(&device->context_idr);
2236
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002237 /* Initalize the snapshot engine */
2238 kgsl_device_snapshot_init(device);
2239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002240 /* sysfs and debugfs initalization - failure here is non fatal */
2241
2242 /* Initialize logging */
2243 kgsl_device_debugfs_init(device);
2244
2245 /* Initialize common sysfs entries */
2246 kgsl_pwrctrl_init_sysfs(device);
2247
2248 return 0;
2249
2250err_close_mmu:
2251 kgsl_mmu_close(device);
2252err_dest_work_q:
2253 destroy_workqueue(device->work_queue);
2254 device->work_queue = NULL;
2255err_devlist:
2256 mutex_lock(&kgsl_driver.devlock);
2257 kgsl_driver.devp[minor] = NULL;
2258 mutex_unlock(&kgsl_driver.devlock);
2259
2260 return ret;
2261}
2262EXPORT_SYMBOL(kgsl_register_device);
2263
2264int kgsl_device_platform_probe(struct kgsl_device *device,
2265 irqreturn_t (*dev_isr) (int, void*))
2266{
Michael Street8bacdd02012-01-05 14:55:01 -08002267 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002268 int status = -EINVAL;
2269 struct kgsl_memregion *regspace = NULL;
2270 struct resource *res;
2271 struct platform_device *pdev =
2272 container_of(device->parentdev, struct platform_device, dev);
2273
2274 pm_runtime_enable(device->parentdev);
2275
2276 status = kgsl_pwrctrl_init(device);
2277 if (status)
2278 goto error;
2279
2280 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2281 device->iomemname);
2282 if (res == NULL) {
2283 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2284 status = -EINVAL;
2285 goto error_pwrctrl_close;
2286 }
2287 if (res->start == 0 || resource_size(res) == 0) {
2288 KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
2289 status = -EINVAL;
2290 goto error_pwrctrl_close;
2291 }
2292
2293 regspace = &device->regspace;
2294 regspace->mmio_phys_base = res->start;
2295 regspace->sizebytes = resource_size(res);
2296
2297 if (!request_mem_region(regspace->mmio_phys_base,
2298 regspace->sizebytes, device->name)) {
2299 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2300 status = -ENODEV;
2301 goto error_pwrctrl_close;
2302 }
2303
2304 regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
2305 regspace->sizebytes);
2306
2307 if (regspace->mmio_virt_base == NULL) {
2308 KGSL_DRV_ERR(device, "ioremap failed\n");
2309 status = -ENODEV;
2310 goto error_release_mem;
2311 }
2312
2313 status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
2314 IRQF_TRIGGER_HIGH, device->name, device);
2315 if (status) {
2316 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2317 device->pwrctrl.interrupt_num, status);
2318 goto error_iounmap;
2319 }
2320 device->pwrctrl.have_irq = 1;
2321 disable_irq(device->pwrctrl.interrupt_num);
2322
2323 KGSL_DRV_INFO(device,
2324 "dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
2325 device->id, regspace->mmio_phys_base,
2326 regspace->sizebytes, regspace->mmio_virt_base);
2327
Michael Street8bacdd02012-01-05 14:55:01 -08002328 result = kgsl_drm_init(pdev);
2329 if (result)
2330 goto error_iounmap;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002331
2332 status = kgsl_register_device(device);
2333 if (!status)
2334 return status;
2335
2336 free_irq(device->pwrctrl.interrupt_num, NULL);
2337 device->pwrctrl.have_irq = 0;
2338error_iounmap:
2339 iounmap(regspace->mmio_virt_base);
2340 regspace->mmio_virt_base = NULL;
2341error_release_mem:
2342 release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
2343error_pwrctrl_close:
2344 kgsl_pwrctrl_close(device);
2345error:
2346 return status;
2347}
2348EXPORT_SYMBOL(kgsl_device_platform_probe);
2349
2350void kgsl_device_platform_remove(struct kgsl_device *device)
2351{
2352 struct kgsl_memregion *regspace = &device->regspace;
2353
2354 kgsl_unregister_device(device);
2355
2356 if (regspace->mmio_virt_base != NULL) {
2357 iounmap(regspace->mmio_virt_base);
2358 regspace->mmio_virt_base = NULL;
2359 release_mem_region(regspace->mmio_phys_base,
2360 regspace->sizebytes);
2361 }
2362 kgsl_pwrctrl_close(device);
2363
2364 pm_runtime_disable(device->parentdev);
2365}
2366EXPORT_SYMBOL(kgsl_device_platform_remove);
2367
2368static int __devinit
2369kgsl_ptdata_init(void)
2370{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002371 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(KGSL_PAGETABLE_SIZE,
2372 kgsl_pagetable_count);
2373 if (!kgsl_driver.ptpool)
2374 return -ENOMEM;
2375 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002376}
2377
2378static void kgsl_core_exit(void)
2379{
2380 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
2381
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002382 kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
2383 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002384
2385 device_unregister(&kgsl_driver.virtdev);
2386
2387 if (kgsl_driver.class) {
2388 class_destroy(kgsl_driver.class);
2389 kgsl_driver.class = NULL;
2390 }
2391
2392 kgsl_drm_exit();
2393 kgsl_cffdump_destroy();
Jordan Croused8f1c6b2011-10-04 09:31:29 -06002394 kgsl_core_debugfs_close();
2395 kgsl_sharedmem_uninit_sysfs();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396}
2397
2398static int __init kgsl_core_init(void)
2399{
2400 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002401 /* alloc major and minor device numbers */
2402 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2403 KGSL_NAME);
2404 if (result < 0) {
2405 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2406 goto err;
2407 }
2408
2409 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2410 kgsl_driver.cdev.owner = THIS_MODULE;
2411 kgsl_driver.cdev.ops = &kgsl_fops;
2412 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2413 KGSL_DEVICE_MAX);
2414
2415 if (result) {
2416 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2417 " result= %d\n", kgsl_driver.major, result);
2418 goto err;
2419 }
2420
2421 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2422
2423 if (IS_ERR(kgsl_driver.class)) {
2424 result = PTR_ERR(kgsl_driver.class);
2425 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2426 goto err;
2427 }
2428
2429 /* Make a virtual device for managing core related things
2430 in sysfs */
2431 kgsl_driver.virtdev.class = kgsl_driver.class;
2432 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2433 result = device_register(&kgsl_driver.virtdev);
2434 if (result) {
2435 KGSL_CORE_ERR("driver_register failed\n");
2436 goto err;
2437 }
2438
2439 /* Make kobjects in the virtual device for storing statistics */
2440
2441 kgsl_driver.ptkobj =
2442 kobject_create_and_add("pagetables",
2443 &kgsl_driver.virtdev.kobj);
2444
2445 kgsl_driver.prockobj =
2446 kobject_create_and_add("proc",
2447 &kgsl_driver.virtdev.kobj);
2448
2449 kgsl_core_debugfs_init();
2450
2451 kgsl_sharedmem_init_sysfs();
2452 kgsl_cffdump_init();
2453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002454 INIT_LIST_HEAD(&kgsl_driver.process_list);
2455
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002456 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2457
2458 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2459
2460 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2461 result = kgsl_ptdata_init();
2462 if (result)
2463 goto err;
2464 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002466 return 0;
2467
2468err:
2469 kgsl_core_exit();
2470 return result;
2471}
2472
2473module_init(kgsl_core_init);
2474module_exit(kgsl_core_exit);
2475
2476MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2477MODULE_DESCRIPTION("MSM GPU driver");
2478MODULE_LICENSE("GPL");