blob: 9333dca4cf7d8e134111666927c52f7ada658780 [file] [log] [blame]
Tarun Karraf8e5cd22012-01-09 14:10:09 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/fb.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/debugfs.h>
17#include <linux/uaccess.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/android_pmem.h>
21#include <linux/vmalloc.h>
22#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070023#include <linux/genlock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include <linux/ashmem.h>
26#include <linux/major.h>
Jordan Crouse8eab35a2011-10-12 16:57:48 -060027#include <linux/ion.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070028#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include "kgsl.h"
31#include "kgsl_debugfs.h"
32#include "kgsl_cffdump.h"
33#include "kgsl_log.h"
34#include "kgsl_sharedmem.h"
35#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060036#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "kgsl."
40
41static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060042static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043module_param_named(ptcount, kgsl_pagetable_count, int, 0);
44MODULE_PARM_DESC(kgsl_pagetable_count,
45"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060046module_param_named(mmutype, ksgl_mmu_type, charp, 0);
47MODULE_PARM_DESC(ksgl_mmu_type,
48"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jordan Crouse8eab35a2011-10-12 16:57:48 -060050static struct ion_client *kgsl_ion_client;
51
Jordan Croused4bc9d22011-11-17 13:39:21 -070052/**
53 * kgsl_add_event - Add a new timstamp event for the KGSL device
54 * @device - KGSL device for the new event
55 * @ts - the timestamp to trigger the event on
56 * @cb - callback function to call when the timestamp expires
57 * @priv - private data for the specific event type
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070058 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -070059 *
60 * @returns - 0 on success or error code on failure
61 */
62
63static int kgsl_add_event(struct kgsl_device *device, u32 ts,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070064 void (*cb)(struct kgsl_device *, void *, u32), void *priv,
65 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -070066{
67 struct kgsl_event *event;
68 struct list_head *n;
69 unsigned int cur = device->ftbl->readtimestamp(device,
70 KGSL_TIMESTAMP_RETIRED);
71
72 if (cb == NULL)
73 return -EINVAL;
74
75 /* Check to see if the requested timestamp has already fired */
76
77 if (timestamp_cmp(cur, ts) >= 0) {
78 cb(device, priv, cur);
79 return 0;
80 }
81
82 event = kzalloc(sizeof(*event), GFP_KERNEL);
83 if (event == NULL)
84 return -ENOMEM;
85
86 event->timestamp = ts;
87 event->priv = priv;
88 event->func = cb;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070089 event->owner = owner;
Jordan Croused4bc9d22011-11-17 13:39:21 -070090
91 /* Add the event in order to the list */
92
93 for (n = device->events.next ; n != &device->events; n = n->next) {
94 struct kgsl_event *e =
95 list_entry(n, struct kgsl_event, list);
96
97 if (timestamp_cmp(e->timestamp, ts) > 0) {
98 list_add(&event->list, n->prev);
99 break;
100 }
101 }
102
103 if (n == &device->events)
104 list_add_tail(&event->list, &device->events);
105
Jeremy Gebben63904832012-02-07 16:10:55 -0700106 queue_work(device->work_queue, &device->ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700107 return 0;
108}
Jordan Croused4bc9d22011-11-17 13:39:21 -0700109
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700110/**
111 * kgsl_cancel_events - Cancel all events for a process
112 * @device - KGSL device for the events to cancel
113 * @owner - driver instance that owns the events to cancel
114 *
115 */
116static void kgsl_cancel_events(struct kgsl_device *device,
117 struct kgsl_device_private *owner)
118{
119 struct kgsl_event *event, *event_tmp;
120 unsigned int cur = device->ftbl->readtimestamp(device,
121 KGSL_TIMESTAMP_RETIRED);
122
123 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
124 if (event->owner != owner)
125 continue;
126 /*
127 * "cancel" the events by calling their callback.
128 * Currently, events are used for lock and memory
129 * management, so if the process is dying the right
130 * thing to do is release or free.
131 */
132 if (event->func)
133 event->func(device, event->priv, cur);
134
135 list_del(&event->list);
136 kfree(event);
137 }
138}
139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140static inline struct kgsl_mem_entry *
141kgsl_mem_entry_create(void)
142{
143 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
144
145 if (!entry)
146 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
147 else
148 kref_init(&entry->refcount);
149
150 return entry;
151}
152
153void
154kgsl_mem_entry_destroy(struct kref *kref)
155{
156 struct kgsl_mem_entry *entry = container_of(kref,
157 struct kgsl_mem_entry,
158 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600159
160 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
161
162 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
163 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600165 /*
166 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
167 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
168 * doesn't try to free it again
169 */
170
171 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
172 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
173 entry->memdesc.sg = NULL;
174 }
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 kgsl_sharedmem_free(&entry->memdesc);
177
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600178 switch (entry->memtype) {
179 case KGSL_MEM_ENTRY_PMEM:
180 case KGSL_MEM_ENTRY_ASHMEM:
181 if (entry->priv_data)
182 fput(entry->priv_data);
183 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600184 case KGSL_MEM_ENTRY_ION:
185 ion_free(kgsl_ion_client, entry->priv_data);
186 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 }
188
189 kfree(entry);
190}
191EXPORT_SYMBOL(kgsl_mem_entry_destroy);
192
193static
194void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
195 struct kgsl_process_private *process)
196{
197 spin_lock(&process->mem_lock);
198 list_add(&entry->list, &process->mem_list);
199 spin_unlock(&process->mem_lock);
200
201 entry->priv = process;
202}
203
204/* Allocate a new context id */
205
206static struct kgsl_context *
207kgsl_create_context(struct kgsl_device_private *dev_priv)
208{
209 struct kgsl_context *context;
210 int ret, id;
211
212 context = kzalloc(sizeof(*context), GFP_KERNEL);
213
214 if (context == NULL)
215 return NULL;
216
217 while (1) {
218 if (idr_pre_get(&dev_priv->device->context_idr,
219 GFP_KERNEL) == 0) {
220 kfree(context);
221 return NULL;
222 }
223
224 ret = idr_get_new(&dev_priv->device->context_idr,
225 context, &id);
226
227 if (ret != -EAGAIN)
228 break;
229 }
230
231 if (ret) {
232 kfree(context);
233 return NULL;
234 }
235
236 context->id = id;
237 context->dev_priv = dev_priv;
238
239 return context;
240}
241
242static void
243kgsl_destroy_context(struct kgsl_device_private *dev_priv,
244 struct kgsl_context *context)
245{
246 int id;
247
248 if (context == NULL)
249 return;
250
251 /* Fire a bug if the devctxt hasn't been freed */
252 BUG_ON(context->devctxt);
253
254 id = context->id;
255 kfree(context);
256
257 idr_remove(&dev_priv->device->context_idr, id);
258}
259
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600260static void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600262 struct kgsl_device *device = container_of(work, struct kgsl_device,
263 ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700264 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 uint32_t ts_processed;
266
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600267 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
269 /* get current EOP timestamp */
270 ts_processed = device->ftbl->readtimestamp(device,
271 KGSL_TIMESTAMP_RETIRED);
272
Jordan Croused4bc9d22011-11-17 13:39:21 -0700273 /* Process expired events */
274 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
275 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
276 break;
277
278 if (event->func)
279 event->func(device, event->priv, ts_processed);
280
281 list_del(&event->list);
282 kfree(event);
283 }
284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 mutex_unlock(&device->mutex);
286}
287
288static void kgsl_check_idle_locked(struct kgsl_device *device)
289{
290 if (device->pwrctrl.nap_allowed == true &&
291 device->state == KGSL_STATE_ACTIVE &&
292 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700293 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 if (kgsl_pwrctrl_sleep(device) != 0)
295 mod_timer(&device->idle_timer,
296 jiffies +
297 device->pwrctrl.interval_timeout);
298 }
299}
300
301static void kgsl_check_idle(struct kgsl_device *device)
302{
303 mutex_lock(&device->mutex);
304 kgsl_check_idle_locked(device);
305 mutex_unlock(&device->mutex);
306}
307
308struct kgsl_device *kgsl_get_device(int dev_idx)
309{
310 int i;
311 struct kgsl_device *ret = NULL;
312
313 mutex_lock(&kgsl_driver.devlock);
314
315 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
316 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
317 ret = kgsl_driver.devp[i];
318 break;
319 }
320 }
321
322 mutex_unlock(&kgsl_driver.devlock);
323 return ret;
324}
325EXPORT_SYMBOL(kgsl_get_device);
326
327static struct kgsl_device *kgsl_get_minor(int minor)
328{
329 struct kgsl_device *ret = NULL;
330
331 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
332 return NULL;
333
334 mutex_lock(&kgsl_driver.devlock);
335 ret = kgsl_driver.devp[minor];
336 mutex_unlock(&kgsl_driver.devlock);
337
338 return ret;
339}
340
341int kgsl_register_ts_notifier(struct kgsl_device *device,
342 struct notifier_block *nb)
343{
344 BUG_ON(device == NULL);
345 return atomic_notifier_chain_register(&device->ts_notifier_list,
346 nb);
347}
348EXPORT_SYMBOL(kgsl_register_ts_notifier);
349
350int kgsl_unregister_ts_notifier(struct kgsl_device *device,
351 struct notifier_block *nb)
352{
353 BUG_ON(device == NULL);
354 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
355 nb);
356}
357EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
358
359int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
360{
361 unsigned int ts_processed;
362
363 ts_processed = device->ftbl->readtimestamp(device,
364 KGSL_TIMESTAMP_RETIRED);
365
Jordan Crousee6239dd2011-11-17 13:39:21 -0700366 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367}
368EXPORT_SYMBOL(kgsl_check_timestamp);
369
370static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
371{
372 int status = -EINVAL;
373 unsigned int nap_allowed_saved;
374 struct kgsl_pwrscale_policy *policy_saved;
375
376 if (!device)
377 return -EINVAL;
378
379 KGSL_PWR_WARN(device, "suspend start\n");
380
381 mutex_lock(&device->mutex);
382 nap_allowed_saved = device->pwrctrl.nap_allowed;
383 device->pwrctrl.nap_allowed = false;
384 policy_saved = device->pwrscale.policy;
385 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700386 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* Make sure no user process is waiting for a timestamp *
388 * before supending */
389 if (device->active_cnt != 0) {
390 mutex_unlock(&device->mutex);
391 wait_for_completion(&device->suspend_gate);
392 mutex_lock(&device->mutex);
393 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800394 /* Don't let the timer wake us during suspended sleep. */
395 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 switch (device->state) {
397 case KGSL_STATE_INIT:
398 break;
399 case KGSL_STATE_ACTIVE:
400 /* Wait for the device to become idle */
401 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
402 case KGSL_STATE_NAP:
403 case KGSL_STATE_SLEEP:
404 /* Get the completion ready to be waited upon. */
405 INIT_COMPLETION(device->hwaccess_gate);
406 device->ftbl->suspend_context(device);
407 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700408 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700410 case KGSL_STATE_SLUMBER:
411 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700412 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700413 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 default:
415 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
416 device->id);
417 goto end;
418 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700419 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 device->pwrctrl.nap_allowed = nap_allowed_saved;
421 device->pwrscale.policy = policy_saved;
422 status = 0;
423
424end:
425 mutex_unlock(&device->mutex);
426 KGSL_PWR_WARN(device, "suspend end\n");
427 return status;
428}
429
430static int kgsl_resume_device(struct kgsl_device *device)
431{
432 int status = -EINVAL;
433
434 if (!device)
435 return -EINVAL;
436
437 KGSL_PWR_WARN(device, "resume start\n");
438 mutex_lock(&device->mutex);
439 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700440 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700441 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 complete_all(&device->hwaccess_gate);
443 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700444 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 KGSL_PWR_WARN(device, "resume end\n");
448 return status;
449}
450
451static int kgsl_suspend(struct device *dev)
452{
453
454 pm_message_t arg = {0};
455 struct kgsl_device *device = dev_get_drvdata(dev);
456 return kgsl_suspend_device(device, arg);
457}
458
459static int kgsl_resume(struct device *dev)
460{
461 struct kgsl_device *device = dev_get_drvdata(dev);
462 return kgsl_resume_device(device);
463}
464
465static int kgsl_runtime_suspend(struct device *dev)
466{
467 return 0;
468}
469
470static int kgsl_runtime_resume(struct device *dev)
471{
472 return 0;
473}
474
475const struct dev_pm_ops kgsl_pm_ops = {
476 .suspend = kgsl_suspend,
477 .resume = kgsl_resume,
478 .runtime_suspend = kgsl_runtime_suspend,
479 .runtime_resume = kgsl_runtime_resume,
480};
481EXPORT_SYMBOL(kgsl_pm_ops);
482
483void kgsl_early_suspend_driver(struct early_suspend *h)
484{
485 struct kgsl_device *device = container_of(h,
486 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700487 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530488 mutex_lock(&device->mutex);
Lucille Sylvester344e4622012-01-18 15:53:21 -0700489 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700490 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530491 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700492 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493}
494EXPORT_SYMBOL(kgsl_early_suspend_driver);
495
496int kgsl_suspend_driver(struct platform_device *pdev,
497 pm_message_t state)
498{
499 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
500 return kgsl_suspend_device(device, state);
501}
502EXPORT_SYMBOL(kgsl_suspend_driver);
503
504int kgsl_resume_driver(struct platform_device *pdev)
505{
506 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
507 return kgsl_resume_device(device);
508}
509EXPORT_SYMBOL(kgsl_resume_driver);
510
511void kgsl_late_resume_driver(struct early_suspend *h)
512{
513 struct kgsl_device *device = container_of(h,
514 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700515 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530516 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700517 kgsl_pwrctrl_wake(device);
518 device->pwrctrl.restore_slumber = 0;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700519 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530520 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700521 kgsl_check_idle(device);
522 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523}
524EXPORT_SYMBOL(kgsl_late_resume_driver);
525
526/* file operations */
527static struct kgsl_process_private *
528kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
529{
530 struct kgsl_process_private *private;
531
532 mutex_lock(&kgsl_driver.process_mutex);
533 list_for_each_entry(private, &kgsl_driver.process_list, list) {
534 if (private->pid == task_tgid_nr(current)) {
535 private->refcnt++;
536 goto out;
537 }
538 }
539
540 /* no existing process private found for this dev_priv, create one */
541 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
542 if (private == NULL) {
543 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
544 sizeof(struct kgsl_process_private));
545 goto out;
546 }
547
548 spin_lock_init(&private->mem_lock);
549 private->refcnt = 1;
550 private->pid = task_tgid_nr(current);
551
552 INIT_LIST_HEAD(&private->mem_list);
553
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600554 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 {
556 unsigned long pt_name;
557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 private->pagetable = kgsl_mmu_getpagetable(pt_name);
560 if (private->pagetable == NULL) {
561 kfree(private);
562 private = NULL;
563 goto out;
564 }
565 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566
567 list_add(&private->list, &kgsl_driver.process_list);
568
569 kgsl_process_init_sysfs(private);
570
571out:
572 mutex_unlock(&kgsl_driver.process_mutex);
573 return private;
574}
575
576static void
577kgsl_put_process_private(struct kgsl_device *device,
578 struct kgsl_process_private *private)
579{
580 struct kgsl_mem_entry *entry = NULL;
581 struct kgsl_mem_entry *entry_tmp = NULL;
582
583 if (!private)
584 return;
585
586 mutex_lock(&kgsl_driver.process_mutex);
587
588 if (--private->refcnt)
589 goto unlock;
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 kgsl_process_uninit_sysfs(private);
592
593 list_del(&private->list);
594
595 list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
596 list_del(&entry->list);
597 kgsl_mem_entry_put(entry);
598 }
599
600 kgsl_mmu_putpagetable(private->pagetable);
601 kfree(private);
602unlock:
603 mutex_unlock(&kgsl_driver.process_mutex);
604}
605
606static int kgsl_release(struct inode *inodep, struct file *filep)
607{
608 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600609 struct kgsl_device_private *dev_priv = filep->private_data;
610 struct kgsl_process_private *private = dev_priv->process_priv;
611 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 struct kgsl_context *context;
613 int next = 0;
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 filep->private_data = NULL;
616
617 mutex_lock(&device->mutex);
618 kgsl_check_suspended(device);
619
620 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600621 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 if (context == NULL)
623 break;
624
625 if (context->dev_priv == dev_priv) {
626 device->ftbl->drawctxt_destroy(device, context);
627 kgsl_destroy_context(dev_priv, context);
628 }
629
630 next = next + 1;
631 }
632
633 device->open_count--;
634 if (device->open_count == 0) {
635 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700636 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 }
638 /* clean up any to-be-freed entries that belong to this
639 * process and this device
640 */
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700641 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
643 mutex_unlock(&device->mutex);
644 kfree(dev_priv);
645
646 kgsl_put_process_private(device, private);
647
648 pm_runtime_put(device->parentdev);
649 return result;
650}
651
652static int kgsl_open(struct inode *inodep, struct file *filep)
653{
654 int result;
655 struct kgsl_device_private *dev_priv;
656 struct kgsl_device *device;
657 unsigned int minor = iminor(inodep);
658
659 device = kgsl_get_minor(minor);
660 BUG_ON(device == NULL);
661
662 if (filep->f_flags & O_EXCL) {
663 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
664 return -EBUSY;
665 }
666
667 result = pm_runtime_get_sync(device->parentdev);
668 if (result < 0) {
669 KGSL_DRV_ERR(device,
670 "Runtime PM: Unable to wake up the device, rc = %d\n",
671 result);
672 return result;
673 }
674 result = 0;
675
676 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
677 if (dev_priv == NULL) {
678 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
679 sizeof(struct kgsl_device_private));
680 result = -ENOMEM;
681 goto err_pmruntime;
682 }
683
684 dev_priv->device = device;
685 filep->private_data = dev_priv;
686
687 /* Get file (per process) private struct */
688 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
689 if (dev_priv->process_priv == NULL) {
690 result = -ENOMEM;
691 goto err_freedevpriv;
692 }
693
694 mutex_lock(&device->mutex);
695 kgsl_check_suspended(device);
696
697 if (device->open_count == 0) {
698 result = device->ftbl->start(device, true);
699
700 if (result) {
701 mutex_unlock(&device->mutex);
702 goto err_putprocess;
703 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700704 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 }
706 device->open_count++;
707 mutex_unlock(&device->mutex);
708
709 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
710 device->name, kgsl_mmu_enabled() ? "on" : "off",
711 kgsl_pagetable_count);
712
713 return result;
714
715err_putprocess:
716 kgsl_put_process_private(device, dev_priv->process_priv);
717err_freedevpriv:
718 filep->private_data = NULL;
719 kfree(dev_priv);
720err_pmruntime:
721 pm_runtime_put(device->parentdev);
722 return result;
723}
724
725
726/*call with private->mem_lock locked */
727static struct kgsl_mem_entry *
728kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
729{
730 struct kgsl_mem_entry *entry = NULL, *result = NULL;
731
732 BUG_ON(private == NULL);
733
734 gpuaddr &= PAGE_MASK;
735
736 list_for_each_entry(entry, &private->mem_list, list) {
737 if (entry->memdesc.gpuaddr == gpuaddr) {
738 result = entry;
739 break;
740 }
741 }
742 return result;
743}
744
745/*call with private->mem_lock locked */
746struct kgsl_mem_entry *
747kgsl_sharedmem_find_region(struct kgsl_process_private *private,
748 unsigned int gpuaddr,
749 size_t size)
750{
751 struct kgsl_mem_entry *entry = NULL, *result = NULL;
752
753 BUG_ON(private == NULL);
754
755 list_for_each_entry(entry, &private->mem_list, list) {
Jeremy Gebben16e80fa2011-11-30 15:56:29 -0700756 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 result = entry;
758 break;
759 }
760 }
761
762 return result;
763}
764EXPORT_SYMBOL(kgsl_sharedmem_find_region);
765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766/*call all ioctl sub functions with driver locked*/
767static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
768 unsigned int cmd, void *data)
769{
770 int result = 0;
771 struct kgsl_device_getproperty *param = data;
772
773 switch (param->type) {
774 case KGSL_PROP_VERSION:
775 {
776 struct kgsl_version version;
777 if (param->sizebytes != sizeof(version)) {
778 result = -EINVAL;
779 break;
780 }
781
782 version.drv_major = KGSL_VERSION_MAJOR;
783 version.drv_minor = KGSL_VERSION_MINOR;
784 version.dev_major = dev_priv->device->ver_major;
785 version.dev_minor = dev_priv->device->ver_minor;
786
787 if (copy_to_user(param->value, &version, sizeof(version)))
788 result = -EFAULT;
789
790 break;
791 }
792 default:
793 result = dev_priv->device->ftbl->getproperty(
794 dev_priv->device, param->type,
795 param->value, param->sizebytes);
796 }
797
798
799 return result;
800}
801
802static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
803 *dev_priv, unsigned int cmd,
804 void *data)
805{
806 int result = 0;
807 struct kgsl_device_waittimestamp *param = data;
808
809 /* Set the active count so that suspend doesn't do the
810 wrong thing */
811
812 dev_priv->device->active_cnt++;
813
Norman Geed7402ff2011-10-28 08:51:11 -0600814 trace_kgsl_waittimestamp_entry(dev_priv->device, param);
815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
817 param->timestamp,
818 param->timeout);
819
Norman Geed7402ff2011-10-28 08:51:11 -0600820 trace_kgsl_waittimestamp_exit(dev_priv->device, result);
821
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700822 /* Fire off any pending suspend operations that are in flight */
823
824 INIT_COMPLETION(dev_priv->device->suspend_gate);
825 dev_priv->device->active_cnt--;
826 complete(&dev_priv->device->suspend_gate);
827
828 return result;
829}
830static bool check_ibdesc(struct kgsl_device_private *dev_priv,
831 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
832 bool parse)
833{
834 bool result = true;
835 unsigned int i;
836 for (i = 0; i < numibs; i++) {
837 struct kgsl_mem_entry *entry;
838 spin_lock(&dev_priv->process_priv->mem_lock);
839 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
840 ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
841 spin_unlock(&dev_priv->process_priv->mem_lock);
842 if (entry == NULL) {
843 KGSL_DRV_ERR(dev_priv->device,
844 "invalid cmd buffer gpuaddr %08x " \
845 "sizedwords %d\n", ibdesc[i].gpuaddr,
846 ibdesc[i].sizedwords);
847 result = false;
848 break;
849 }
850
851 if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
852 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
853 KGSL_DRV_ERR(dev_priv->device,
854 "invalid cmd buffer gpuaddr %08x " \
855 "sizedwords %d numibs %d/%d\n",
856 ibdesc[i].gpuaddr,
857 ibdesc[i].sizedwords, i+1, numibs);
858 result = false;
859 break;
860 }
861 }
862 return result;
863}
864
865static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
866 unsigned int cmd, void *data)
867{
868 int result = 0;
869 struct kgsl_ringbuffer_issueibcmds *param = data;
870 struct kgsl_ibdesc *ibdesc;
871 struct kgsl_context *context;
872
873#ifdef CONFIG_MSM_KGSL_DRM
874 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
875#endif
876
877 context = kgsl_find_context(dev_priv, param->drawctxt_id);
878 if (context == NULL) {
879 result = -EINVAL;
880 KGSL_DRV_ERR(dev_priv->device,
881 "invalid drawctxt drawctxt_id %d\n",
882 param->drawctxt_id);
883 goto done;
884 }
885
886 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
887 KGSL_DRV_INFO(dev_priv->device,
888 "Using IB list mode for ib submission, numibs: %d\n",
889 param->numibs);
890 if (!param->numibs) {
891 KGSL_DRV_ERR(dev_priv->device,
892 "Invalid numibs as parameter: %d\n",
893 param->numibs);
894 result = -EINVAL;
895 goto done;
896 }
897
898 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
899 GFP_KERNEL);
900 if (!ibdesc) {
901 KGSL_MEM_ERR(dev_priv->device,
902 "kzalloc(%d) failed\n",
903 sizeof(struct kgsl_ibdesc) * param->numibs);
904 result = -ENOMEM;
905 goto done;
906 }
907
908 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
909 sizeof(struct kgsl_ibdesc) * param->numibs)) {
910 result = -EFAULT;
911 KGSL_DRV_ERR(dev_priv->device,
912 "copy_from_user failed\n");
913 goto free_ibdesc;
914 }
915 } else {
916 KGSL_DRV_INFO(dev_priv->device,
917 "Using single IB submission mode for ib submission\n");
918 /* If user space driver is still using the old mode of
919 * submitting single ib then we need to support that as well */
920 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
921 if (!ibdesc) {
922 KGSL_MEM_ERR(dev_priv->device,
923 "kzalloc(%d) failed\n",
924 sizeof(struct kgsl_ibdesc));
925 result = -ENOMEM;
926 goto done;
927 }
928 ibdesc[0].gpuaddr = param->ibdesc_addr;
929 ibdesc[0].sizedwords = param->numibs;
930 param->numibs = 1;
931 }
932
933 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
934 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
935 result = -EINVAL;
936 goto free_ibdesc;
937 }
938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
940 context,
941 ibdesc,
942 param->numibs,
943 &param->timestamp,
944 param->flags);
945
Norman Geed7402ff2011-10-28 08:51:11 -0600946 trace_kgsl_issueibcmds(dev_priv->device, param, result);
947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 if (result != 0)
949 goto free_ibdesc;
950
951 /* this is a check to try to detect if a command buffer was freed
952 * during issueibcmds().
953 */
954 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
955 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
956 result = -EINVAL;
957 goto free_ibdesc;
958 }
959
960free_ibdesc:
961 kfree(ibdesc);
962done:
963
964#ifdef CONFIG_MSM_KGSL_DRM
965 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
966#endif
967
968 return result;
969}
970
971static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
972 *dev_priv, unsigned int cmd,
973 void *data)
974{
975 struct kgsl_cmdstream_readtimestamp *param = data;
976
977 param->timestamp =
978 dev_priv->device->ftbl->readtimestamp(dev_priv->device,
979 param->type);
980
Norman Geed7402ff2011-10-28 08:51:11 -0600981 trace_kgsl_readtimestamp(dev_priv->device, param);
982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700983 return 0;
984}
985
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -0700986static void kgsl_freemem_event_cb(struct kgsl_device *device,
987 void *priv, u32 timestamp)
988{
989 struct kgsl_mem_entry *entry = priv;
990 spin_lock(&entry->priv->mem_lock);
991 list_del(&entry->list);
992 spin_unlock(&entry->priv->mem_lock);
993 kgsl_mem_entry_put(entry);
994}
995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700996static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
997 *dev_priv, unsigned int cmd,
998 void *data)
999{
1000 int result = 0;
1001 struct kgsl_cmdstream_freememontimestamp *param = data;
1002 struct kgsl_mem_entry *entry = NULL;
1003
1004 spin_lock(&dev_priv->process_priv->mem_lock);
1005 entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006 spin_unlock(&dev_priv->process_priv->mem_lock);
1007
1008 if (entry) {
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001009 result = kgsl_add_event(dev_priv->device, param->timestamp,
1010 kgsl_freemem_event_cb, entry, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 } else {
1012 KGSL_DRV_ERR(dev_priv->device,
1013 "invalid gpuaddr %08x\n", param->gpuaddr);
1014 result = -EINVAL;
1015 }
1016
1017 return result;
1018}
1019
1020static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1021 unsigned int cmd, void *data)
1022{
1023 int result = 0;
1024 struct kgsl_drawctxt_create *param = data;
1025 struct kgsl_context *context = NULL;
1026
1027 context = kgsl_create_context(dev_priv);
1028
1029 if (context == NULL) {
1030 result = -ENOMEM;
1031 goto done;
1032 }
1033
1034 if (dev_priv->device->ftbl->drawctxt_create)
1035 result = dev_priv->device->ftbl->drawctxt_create(
1036 dev_priv->device, dev_priv->process_priv->pagetable,
1037 context, param->flags);
1038
1039 param->drawctxt_id = context->id;
1040
1041done:
1042 if (result && context)
1043 kgsl_destroy_context(dev_priv, context);
1044
1045 return result;
1046}
1047
1048static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1049 unsigned int cmd, void *data)
1050{
1051 int result = 0;
1052 struct kgsl_drawctxt_destroy *param = data;
1053 struct kgsl_context *context;
1054
1055 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1056
1057 if (context == NULL) {
1058 result = -EINVAL;
1059 goto done;
1060 }
1061
1062 if (dev_priv->device->ftbl->drawctxt_destroy)
1063 dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
1064 context);
1065
1066 kgsl_destroy_context(dev_priv, context);
1067
1068done:
1069 return result;
1070}
1071
1072static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1073 unsigned int cmd, void *data)
1074{
1075 int result = 0;
1076 struct kgsl_sharedmem_free *param = data;
1077 struct kgsl_process_private *private = dev_priv->process_priv;
1078 struct kgsl_mem_entry *entry = NULL;
1079
1080 spin_lock(&private->mem_lock);
1081 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1082 if (entry)
1083 list_del(&entry->list);
1084 spin_unlock(&private->mem_lock);
1085
1086 if (entry) {
1087 kgsl_mem_entry_put(entry);
1088 } else {
1089 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1090 result = -EINVAL;
1091 }
1092
1093 return result;
1094}
1095
1096static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1097{
1098 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099
1100 down_read(&current->mm->mmap_sem);
1101 vma = find_vma(current->mm, addr);
1102 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001103 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 return vma;
1107}
1108
1109static long
1110kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
1111 unsigned int cmd, void *data)
1112{
1113 int result = 0, len = 0;
1114 struct kgsl_process_private *private = dev_priv->process_priv;
1115 struct kgsl_sharedmem_from_vmalloc *param = data;
1116 struct kgsl_mem_entry *entry = NULL;
1117 struct vm_area_struct *vma;
1118
1119 if (!kgsl_mmu_enabled())
1120 return -ENODEV;
1121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 if (!param->hostptr) {
1123 KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
1124 result = -EINVAL;
1125 goto error;
1126 }
1127
1128 vma = kgsl_get_vma_from_start_addr(param->hostptr);
1129 if (!vma) {
1130 result = -EINVAL;
1131 goto error;
1132 }
Jordan Crouse2c542b62011-07-26 08:30:20 -06001133
1134 /*
1135 * If the user specified a length, use it, otherwise try to
1136 * infer the length if the vma region
1137 */
1138 if (param->gpuaddr != 0) {
1139 len = param->gpuaddr;
1140 } else {
1141 /*
1142 * For this to work, we have to assume the VMA region is only
1143 * for this single allocation. If it isn't, then bail out
1144 */
1145 if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
1146 KGSL_CORE_ERR("VMA region does not match hostaddr\n");
1147 result = -EINVAL;
1148 goto error;
1149 }
1150
1151 len = vma->vm_end - vma->vm_start;
1152 }
1153
1154 /* Make sure it fits */
1155 if (len == 0 || param->hostptr + len > vma->vm_end) {
1156 KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157 result = -EINVAL;
1158 goto error;
1159 }
1160
1161 entry = kgsl_mem_entry_create();
1162 if (entry == NULL) {
1163 result = -ENOMEM;
1164 goto error;
1165 }
1166
1167 result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
1168 private->pagetable, len,
1169 param->flags);
1170 if (result != 0)
1171 goto error_free_entry;
1172
1173 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1174
1175 result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
1176 if (result) {
1177 KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
1178 goto error_free_vmalloc;
1179 }
1180
1181 param->gpuaddr = entry->memdesc.gpuaddr;
1182
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001183 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184
1185 kgsl_mem_entry_attach_process(entry, private);
1186
1187 /* Process specific statistics */
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001188 kgsl_process_add_stats(private, entry->memtype, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189
1190 kgsl_check_idle(dev_priv->device);
1191 return 0;
1192
1193error_free_vmalloc:
1194 kgsl_sharedmem_free(&entry->memdesc);
1195
1196error_free_entry:
1197 kfree(entry);
1198
1199error:
1200 kgsl_check_idle(dev_priv->device);
1201 return result;
1202}
1203
1204static inline int _check_region(unsigned long start, unsigned long size,
1205 uint64_t len)
1206{
1207 uint64_t end = ((uint64_t) start) + size;
1208 return (end > len);
1209}
1210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1212 unsigned long *vstart, struct file **filep)
1213{
1214 struct file *fbfile;
1215 int ret = 0;
1216 dev_t rdev;
1217 struct fb_info *info;
1218
1219 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001220#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 if (!get_pmem_file(fd, start, vstart, len, filep))
1222 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001223#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224
1225 fbfile = fget(fd);
1226 if (fbfile == NULL) {
1227 KGSL_CORE_ERR("fget_light failed\n");
1228 return -1;
1229 }
1230
1231 rdev = fbfile->f_dentry->d_inode->i_rdev;
1232 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1233 if (info) {
1234 *start = info->fix.smem_start;
1235 *len = info->fix.smem_len;
1236 *vstart = (unsigned long)__va(info->fix.smem_start);
1237 ret = 0;
1238 } else {
1239 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1240 MINOR(rdev));
1241 ret = -1;
1242 }
1243
1244 fput(fbfile);
1245
1246 return ret;
1247}
1248
1249static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1250 struct kgsl_pagetable *pagetable,
1251 unsigned int fd, unsigned int offset,
1252 size_t size)
1253{
1254 int ret;
1255 unsigned long phys, virt, len;
1256 struct file *filep;
1257
1258 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1259 if (ret)
1260 return ret;
1261
Wei Zou4061c0b2011-07-08 10:24:22 -07001262 if (phys == 0) {
1263 ret = -EINVAL;
1264 goto err;
1265 }
1266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267 if (offset >= len) {
1268 ret = -EINVAL;
1269 goto err;
1270 }
1271
1272 if (size == 0)
1273 size = len;
1274
1275 /* Adjust the size of the region to account for the offset */
1276 size += offset & ~PAGE_MASK;
1277
1278 size = ALIGN(size, PAGE_SIZE);
1279
1280 if (_check_region(offset & PAGE_MASK, size, len)) {
1281 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1282 "than pmem region length %ld\n",
1283 offset & PAGE_MASK, size, len);
1284 ret = -EINVAL;
1285 goto err;
1286
1287 }
1288
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001289 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290
1291 entry->memdesc.pagetable = pagetable;
1292 entry->memdesc.size = size;
1293 entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
1294 entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001295
1296 ret = memdesc_sg_phys(&entry->memdesc,
1297 phys + (offset & PAGE_MASK), size);
1298 if (ret)
1299 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300
1301 return 0;
1302err:
Jordan Crousefd978432011-09-02 14:34:32 -06001303#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001304 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001305#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 return ret;
1307}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308
Jordan Croused17e9aa2011-10-12 16:57:48 -06001309static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1310 void *addr, int size)
1311{
1312 int i;
1313 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1314 unsigned long paddr = (unsigned long) addr;
1315
Jeff Boody28afec42012-01-18 15:47:46 -07001316 memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001317 if (memdesc->sg == NULL)
1318 return -ENOMEM;
1319
1320 memdesc->sglen = sglen;
1321 sg_init_table(memdesc->sg, sglen);
1322
1323 spin_lock(&current->mm->page_table_lock);
1324
1325 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1326 struct page *page;
1327 pmd_t *ppmd;
1328 pte_t *ppte;
1329 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1330
1331 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1332 goto err;
1333
1334 ppmd = pmd_offset(ppgd, paddr);
1335 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1336 goto err;
1337
1338 ppte = pte_offset_map(ppmd, paddr);
1339 if (ppte == NULL)
1340 goto err;
1341
1342 page = pfn_to_page(pte_pfn(*ppte));
1343 if (!page)
1344 goto err;
1345
1346 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1347 pte_unmap(ppte);
1348 }
1349
1350 spin_unlock(&current->mm->page_table_lock);
1351
1352 return 0;
1353
1354err:
1355 spin_unlock(&current->mm->page_table_lock);
Jeff Boody28afec42012-01-18 15:47:46 -07001356 vfree(memdesc->sg);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001357 memdesc->sg = NULL;
1358
1359 return -EINVAL;
1360}
1361
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1363 struct kgsl_pagetable *pagetable,
1364 void *hostptr, unsigned int offset,
1365 size_t size)
1366{
1367 struct vm_area_struct *vma;
1368 unsigned int len;
1369
1370 down_read(&current->mm->mmap_sem);
1371 vma = find_vma(current->mm, (unsigned int) hostptr);
1372 up_read(&current->mm->mmap_sem);
1373
1374 if (!vma) {
1375 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1376 return -EINVAL;
1377 }
1378
1379 /* We don't necessarily start at vma->vm_start */
1380 len = vma->vm_end - (unsigned long) hostptr;
1381
1382 if (offset >= len)
1383 return -EINVAL;
1384
1385 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1386 !KGSL_IS_PAGE_ALIGNED(len)) {
1387 KGSL_CORE_ERR("user address len(%u)"
1388 "and start(%p) must be page"
1389 "aligned\n", len, hostptr);
1390 return -EINVAL;
1391 }
1392
1393 if (size == 0)
1394 size = len;
1395
1396 /* Adjust the size of the region to account for the offset */
1397 size += offset & ~PAGE_MASK;
1398
1399 size = ALIGN(size, PAGE_SIZE);
1400
1401 if (_check_region(offset & PAGE_MASK, size, len)) {
1402 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1403 "than region length %d\n",
1404 offset & PAGE_MASK, size, len);
1405 return -EINVAL;
1406 }
1407
1408 entry->memdesc.pagetable = pagetable;
1409 entry->memdesc.size = size;
1410 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411
Jordan Croused17e9aa2011-10-12 16:57:48 -06001412 return memdesc_sg_virt(&entry->memdesc,
1413 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414}
1415
1416#ifdef CONFIG_ASHMEM
1417static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1418 struct kgsl_pagetable *pagetable,
1419 int fd, void *hostptr, size_t size)
1420{
1421 int ret;
1422 struct vm_area_struct *vma;
1423 struct file *filep, *vmfile;
1424 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001425 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426
Jordan Crouse2c542b62011-07-26 08:30:20 -06001427 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 if (vma == NULL)
1429 return -EINVAL;
1430
Jordan Crouse2c542b62011-07-26 08:30:20 -06001431 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1432 KGSL_CORE_ERR("Invalid vma region\n");
1433 return -EINVAL;
1434 }
1435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001436 len = vma->vm_end - vma->vm_start;
1437
1438 if (size == 0)
1439 size = len;
1440
1441 if (size != len) {
1442 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1443 size, hostptr);
1444 return -EINVAL;
1445 }
1446
1447 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1448
1449 if (ret) {
1450 KGSL_CORE_ERR("get_ashmem_file failed\n");
1451 return ret;
1452 }
1453
1454 if (vmfile != vma->vm_file) {
1455 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1456 ret = -EINVAL;
1457 goto err;
1458 }
1459
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001460 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001461 entry->memdesc.pagetable = pagetable;
1462 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1463 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001464
1465 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1466 if (ret)
1467 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468
1469 return 0;
1470
1471err:
1472 put_ashmem_file(filep);
1473 return ret;
1474}
1475#else
1476static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1477 struct kgsl_pagetable *pagetable,
1478 int fd, void *hostptr, size_t size)
1479{
1480 return -EINVAL;
1481}
1482#endif
1483
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001484static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1485 struct kgsl_pagetable *pagetable, int fd)
1486{
1487 struct ion_handle *handle;
1488 struct scatterlist *s;
1489 unsigned long flags;
1490
1491 if (kgsl_ion_client == NULL) {
1492 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
1493 if (kgsl_ion_client == NULL)
1494 return -ENODEV;
1495 }
1496
1497 handle = ion_import_fd(kgsl_ion_client, fd);
1498 if (IS_ERR_OR_NULL(handle))
1499 return PTR_ERR(handle);
1500
1501 entry->memtype = KGSL_MEM_ENTRY_ION;
1502 entry->priv_data = handle;
1503 entry->memdesc.pagetable = pagetable;
1504 entry->memdesc.size = 0;
1505
1506 if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
1507 goto err;
1508
1509 entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
1510
1511 if (IS_ERR_OR_NULL(entry->memdesc.sg))
1512 goto err;
1513
1514 /* Calculate the size of the memdesc from the sglist */
1515
1516 entry->memdesc.sglen = 0;
1517
1518 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1519 entry->memdesc.size += s->length;
1520 entry->memdesc.sglen++;
1521 }
1522
1523 return 0;
1524err:
1525 ion_free(kgsl_ion_client, handle);
1526 return -ENOMEM;
1527}
1528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1530 unsigned int cmd, void *data)
1531{
1532 int result = -EINVAL;
1533 struct kgsl_map_user_mem *param = data;
1534 struct kgsl_mem_entry *entry = NULL;
1535 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001536 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001537
1538 entry = kgsl_mem_entry_create();
1539
1540 if (entry == NULL)
1541 return -ENOMEM;
1542
Jason848741a2011-07-12 10:24:25 -07001543 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1544 memtype = KGSL_USER_MEM_TYPE_PMEM;
1545 else
1546 memtype = param->memtype;
1547
1548 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001549 case KGSL_USER_MEM_TYPE_PMEM:
1550 if (param->fd == 0 || param->len == 0)
1551 break;
1552
1553 result = kgsl_setup_phys_file(entry, private->pagetable,
1554 param->fd, param->offset,
1555 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001556 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001557 break;
1558
1559 case KGSL_USER_MEM_TYPE_ADDR:
1560 if (!kgsl_mmu_enabled()) {
1561 KGSL_DRV_ERR(dev_priv->device,
1562 "Cannot map paged memory with the "
1563 "MMU disabled\n");
1564 break;
1565 }
1566
1567 if (param->hostptr == 0)
1568 break;
1569
1570 result = kgsl_setup_hostptr(entry, private->pagetable,
1571 (void *) param->hostptr,
1572 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001573 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001574 break;
1575
1576 case KGSL_USER_MEM_TYPE_ASHMEM:
1577 if (!kgsl_mmu_enabled()) {
1578 KGSL_DRV_ERR(dev_priv->device,
1579 "Cannot map paged memory with the "
1580 "MMU disabled\n");
1581 break;
1582 }
1583
1584 if (param->hostptr == 0)
1585 break;
1586
1587 result = kgsl_setup_ashmem(entry, private->pagetable,
1588 param->fd, (void *) param->hostptr,
1589 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001590
1591 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001593 case KGSL_USER_MEM_TYPE_ION:
1594 result = kgsl_setup_ion(entry, private->pagetable,
1595 param->fd);
1596 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 default:
Jason848741a2011-07-12 10:24:25 -07001598 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001599 break;
1600 }
1601
1602 if (result)
1603 goto error;
1604
1605 result = kgsl_mmu_map(private->pagetable,
1606 &entry->memdesc,
1607 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1608
1609 if (result)
1610 goto error_put_file_ptr;
1611
1612 /* Adjust the returned value for a non 4k aligned offset */
1613 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001615 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001616 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001618 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001619
1620 kgsl_mem_entry_attach_process(entry, private);
1621
1622 kgsl_check_idle(dev_priv->device);
1623 return result;
1624
1625 error_put_file_ptr:
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001626 if (entry->priv_data)
1627 fput(entry->priv_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001628
1629error:
1630 kfree(entry);
1631 kgsl_check_idle(dev_priv->device);
1632 return result;
1633}
1634
1635/*This function flushes a graphics memory allocation from CPU cache
1636 *when caching is enabled with MMU*/
1637static long
1638kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1639 unsigned int cmd, void *data)
1640{
1641 int result = 0;
1642 struct kgsl_mem_entry *entry;
1643 struct kgsl_sharedmem_free *param = data;
1644 struct kgsl_process_private *private = dev_priv->process_priv;
1645
1646 spin_lock(&private->mem_lock);
1647 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1648 if (!entry) {
1649 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1650 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001651 goto done;
1652 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001653 if (!entry->memdesc.hostptr) {
1654 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1655 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001658
1659 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001661 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 return result;
1663}
1664
1665static long
1666kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1667 unsigned int cmd, void *data)
1668{
1669 struct kgsl_process_private *private = dev_priv->process_priv;
1670 struct kgsl_gpumem_alloc *param = data;
1671 struct kgsl_mem_entry *entry;
1672 int result;
1673
1674 entry = kgsl_mem_entry_create();
1675 if (entry == NULL)
1676 return -ENOMEM;
1677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1679 param->size, param->flags);
1680
1681 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001682 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 kgsl_mem_entry_attach_process(entry, private);
1684 param->gpuaddr = entry->memdesc.gpuaddr;
1685
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001686 kgsl_process_add_stats(private, entry->memtype, param->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 } else
1688 kfree(entry);
1689
1690 kgsl_check_idle(dev_priv->device);
1691 return result;
1692}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001693static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1694 unsigned int cmd, void *data)
1695{
1696 int result = 0;
1697 struct kgsl_cff_syncmem *param = data;
1698 struct kgsl_process_private *private = dev_priv->process_priv;
1699 struct kgsl_mem_entry *entry = NULL;
1700
1701 spin_lock(&private->mem_lock);
1702 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1703 if (entry)
1704 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1705 param->len, true);
1706 else
1707 result = -EINVAL;
1708 spin_unlock(&private->mem_lock);
1709 return result;
1710}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001711
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001712static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1713 unsigned int cmd, void *data)
1714{
1715 int result = 0;
1716 struct kgsl_cff_user_event *param = data;
1717
1718 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1719 param->op3, param->op4, param->op5);
1720
1721 return result;
1722}
1723
Jordan Croused4bc9d22011-11-17 13:39:21 -07001724#ifdef CONFIG_GENLOCK
1725struct kgsl_genlock_event_priv {
1726 struct genlock_handle *handle;
1727 struct genlock *lock;
1728};
1729
1730/**
1731 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
1732 * @device - The KGSL device that expired the timestamp
1733 * @priv - private data for the event
1734 * @timestamp - the timestamp that triggered the event
1735 *
1736 * Release a genlock lock following the expiration of a timestamp
1737 */
1738
1739static void kgsl_genlock_event_cb(struct kgsl_device *device,
1740 void *priv, u32 timestamp)
1741{
1742 struct kgsl_genlock_event_priv *ev = priv;
1743 int ret;
1744
1745 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
1746 if (ret)
1747 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
1748
1749 genlock_put_handle(ev->handle);
1750
1751 kfree(ev);
1752}
1753
1754/**
1755 * kgsl_add_genlock-event - Create a new genlock event
1756 * @device - KGSL device to create the event on
1757 * @timestamp - Timestamp to trigger the event
1758 * @data - User space buffer containing struct kgsl_genlock_event_priv
1759 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001760 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07001761 * @returns 0 on success or error code on error
1762 *
1763 * Attack to a genlock handle and register an event to release the
1764 * genlock lock when the timestamp expires
1765 */
1766
1767static int kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001768 u32 timestamp, void __user *data, int len,
1769 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001770{
1771 struct kgsl_genlock_event_priv *event;
1772 struct kgsl_timestamp_event_genlock priv;
1773 int ret;
1774
1775 if (len != sizeof(priv))
1776 return -EINVAL;
1777
1778 if (copy_from_user(&priv, data, sizeof(priv)))
1779 return -EFAULT;
1780
1781 event = kzalloc(sizeof(*event), GFP_KERNEL);
1782
1783 if (event == NULL)
1784 return -ENOMEM;
1785
1786 event->handle = genlock_get_handle_fd(priv.handle);
1787
1788 if (IS_ERR(event->handle)) {
1789 int ret = PTR_ERR(event->handle);
1790 kfree(event);
1791 return ret;
1792 }
1793
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001794 ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event,
1795 owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001796 if (ret)
1797 kfree(event);
1798
1799 return ret;
1800}
1801#else
1802static long kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001803 u32 timestamp, void __user *data, int len,
1804 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001805{
1806 return -EINVAL;
1807}
1808#endif
1809
1810/**
1811 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
1812 * @dev_priv - pointer to the private device structure
1813 * @cmd - the ioctl cmd passed from kgsl_ioctl
1814 * @data - the user data buffer from kgsl_ioctl
1815 * @returns 0 on success or error code on failure
1816 */
1817
1818static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
1819 unsigned int cmd, void *data)
1820{
1821 struct kgsl_timestamp_event *param = data;
1822 int ret;
1823
1824 switch (param->type) {
1825 case KGSL_TIMESTAMP_EVENT_GENLOCK:
1826 ret = kgsl_add_genlock_event(dev_priv->device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001827 param->timestamp, param->priv, param->len,
1828 dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001829 break;
1830 default:
1831 ret = -EINVAL;
1832 }
1833
1834 return ret;
1835}
1836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
1838 unsigned int, void *);
1839
1840#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
1841 [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
1842
1843static const struct {
1844 unsigned int cmd;
1845 kgsl_ioctl_func_t func;
1846 int lock;
1847} kgsl_ioctl_funcs[] = {
1848 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
1849 kgsl_ioctl_device_getproperty, 1),
1850 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
1851 kgsl_ioctl_device_waittimestamp, 1),
1852 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
1853 kgsl_ioctl_rb_issueibcmds, 1),
1854 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
1855 kgsl_ioctl_cmdstream_readtimestamp, 1),
1856 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
1857 kgsl_ioctl_cmdstream_freememontimestamp, 1),
1858 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
1859 kgsl_ioctl_drawctxt_create, 1),
1860 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
1861 kgsl_ioctl_drawctxt_destroy, 1),
1862 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
1863 kgsl_ioctl_map_user_mem, 0),
1864 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
1865 kgsl_ioctl_map_user_mem, 0),
1866 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
1867 kgsl_ioctl_sharedmem_free, 0),
1868 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
1869 kgsl_ioctl_sharedmem_from_vmalloc, 0),
1870 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
1871 kgsl_ioctl_sharedmem_flush_cache, 0),
1872 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
1873 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001874 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
1875 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001876 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
1877 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07001878 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Lucille Sylvester9329cf02011-12-02 14:30:41 -07001879 kgsl_ioctl_timestamp_event, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880};
1881
1882static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1883{
1884 struct kgsl_device_private *dev_priv = filep->private_data;
1885 unsigned int nr = _IOC_NR(cmd);
1886 kgsl_ioctl_func_t func;
1887 int lock, ret;
1888 char ustack[64];
1889 void *uptr = NULL;
1890
1891 BUG_ON(dev_priv == NULL);
1892
1893 /* Workaround for an previously incorrectly defined ioctl code.
1894 This helps ensure binary compatability */
1895
1896 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
1897 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07001898 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
1899 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001900
1901 if (cmd & (IOC_IN | IOC_OUT)) {
1902 if (_IOC_SIZE(cmd) < sizeof(ustack))
1903 uptr = ustack;
1904 else {
1905 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
1906 if (uptr == NULL) {
1907 KGSL_MEM_ERR(dev_priv->device,
1908 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
1909 ret = -ENOMEM;
1910 goto done;
1911 }
1912 }
1913
1914 if (cmd & IOC_IN) {
1915 if (copy_from_user(uptr, (void __user *) arg,
1916 _IOC_SIZE(cmd))) {
1917 ret = -EFAULT;
1918 goto done;
1919 }
1920 } else
1921 memset(uptr, 0, _IOC_SIZE(cmd));
1922 }
1923
1924 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
1925 kgsl_ioctl_funcs[nr].func != NULL) {
1926 func = kgsl_ioctl_funcs[nr].func;
1927 lock = kgsl_ioctl_funcs[nr].lock;
1928 } else {
1929 func = dev_priv->device->ftbl->ioctl;
1930 if (!func) {
1931 KGSL_DRV_INFO(dev_priv->device,
1932 "invalid ioctl code %08x\n", cmd);
1933 ret = -EINVAL;
1934 goto done;
1935 }
1936 lock = 1;
1937 }
1938
1939 if (lock) {
1940 mutex_lock(&dev_priv->device->mutex);
1941 kgsl_check_suspended(dev_priv->device);
1942 }
1943
1944 ret = func(dev_priv, cmd, uptr);
1945
1946 if (lock) {
1947 kgsl_check_idle_locked(dev_priv->device);
1948 mutex_unlock(&dev_priv->device->mutex);
1949 }
1950
1951 if (ret == 0 && (cmd & IOC_OUT)) {
1952 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
1953 ret = -EFAULT;
1954 }
1955
1956done:
1957 if (_IOC_SIZE(cmd) >= sizeof(ustack))
1958 kfree(uptr);
1959
1960 return ret;
1961}
1962
1963static int
1964kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
1965{
1966 struct kgsl_memdesc *memdesc = &device->memstore;
1967 int result;
1968 unsigned int vma_size = vma->vm_end - vma->vm_start;
1969
1970 /* The memstore can only be mapped as read only */
1971
1972 if (vma->vm_flags & VM_WRITE)
1973 return -EPERM;
1974
1975 if (memdesc->size != vma_size) {
1976 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
1977 vma_size, memdesc->size);
1978 return -EINVAL;
1979 }
1980
1981 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1982
1983 result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1984 vma_size, vma->vm_page_prot);
1985 if (result != 0)
1986 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
1987 result);
1988
1989 return result;
1990}
1991
Jordan Crouse4283e172011-09-26 14:45:47 -06001992/*
1993 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
1994 * Increase the refcount to make sure that the accounting stays correct
1995 */
1996
1997static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
1998{
1999 struct kgsl_mem_entry *entry = vma->vm_private_data;
2000 kgsl_mem_entry_get(entry);
2001}
2002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003static int
2004kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2005{
2006 struct kgsl_mem_entry *entry = vma->vm_private_data;
2007
Jordan Croused17e9aa2011-10-12 16:57:48 -06002008 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 return VM_FAULT_SIGBUS;
2010
2011 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2012}
2013
2014static void
2015kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2016{
2017 struct kgsl_mem_entry *entry = vma->vm_private_data;
2018 kgsl_mem_entry_put(entry);
2019}
2020
2021static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002022 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023 .fault = kgsl_gpumem_vm_fault,
2024 .close = kgsl_gpumem_vm_close,
2025};
2026
2027static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2028{
2029 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 struct kgsl_device_private *dev_priv = file->private_data;
2031 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002032 struct kgsl_mem_entry *tmp, *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002033 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034
2035 /* Handle leagacy behavior for memstore */
2036
2037 if (vma_offset == device->memstore.physaddr)
2038 return kgsl_mmap_memstore(device, vma);
2039
2040 /* Find a chunk of GPU memory */
2041
2042 spin_lock(&private->mem_lock);
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002043 list_for_each_entry(tmp, &private->mem_list, list) {
2044 if (vma_offset == tmp->memdesc.gpuaddr) {
2045 kgsl_mem_entry_get(tmp);
2046 entry = tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 break;
2048 }
2049 }
2050 spin_unlock(&private->mem_lock);
2051
2052 if (entry == NULL)
2053 return -EINVAL;
2054
Jordan Croused17e9aa2011-10-12 16:57:48 -06002055 if (!entry->memdesc.ops ||
2056 !entry->memdesc.ops->vmflags ||
2057 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058 return -EINVAL;
2059
2060 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2061
2062 vma->vm_private_data = entry;
2063 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2064 vma->vm_ops = &kgsl_gpumem_vm_ops;
2065 vma->vm_file = file;
2066
2067 return 0;
2068}
2069
2070static const struct file_operations kgsl_fops = {
2071 .owner = THIS_MODULE,
2072 .release = kgsl_release,
2073 .open = kgsl_open,
2074 .mmap = kgsl_mmap,
2075 .unlocked_ioctl = kgsl_ioctl,
2076};
2077
2078struct kgsl_driver kgsl_driver = {
2079 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2080 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2081 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2082};
2083EXPORT_SYMBOL(kgsl_driver);
2084
2085void kgsl_unregister_device(struct kgsl_device *device)
2086{
2087 int minor;
2088
2089 mutex_lock(&kgsl_driver.devlock);
2090 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2091 if (device == kgsl_driver.devp[minor])
2092 break;
2093 }
2094
2095 mutex_unlock(&kgsl_driver.devlock);
2096
2097 if (minor == KGSL_DEVICE_MAX)
2098 return;
2099
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002100 kgsl_device_snapshot_close(device);
2101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002102 kgsl_cffdump_close(device->id);
2103 kgsl_pwrctrl_uninit_sysfs(device);
2104
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002105 if (cpu_is_msm8x60())
2106 wake_lock_destroy(&device->idle_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002107
2108 idr_destroy(&device->context_idr);
2109
2110 if (device->memstore.hostptr)
2111 kgsl_sharedmem_free(&device->memstore);
2112
2113 kgsl_mmu_close(device);
2114
2115 if (device->work_queue) {
2116 destroy_workqueue(device->work_queue);
2117 device->work_queue = NULL;
2118 }
2119
2120 device_destroy(kgsl_driver.class,
2121 MKDEV(MAJOR(kgsl_driver.major), minor));
2122
2123 mutex_lock(&kgsl_driver.devlock);
2124 kgsl_driver.devp[minor] = NULL;
2125 mutex_unlock(&kgsl_driver.devlock);
2126}
2127EXPORT_SYMBOL(kgsl_unregister_device);
2128
2129int
2130kgsl_register_device(struct kgsl_device *device)
2131{
2132 int minor, ret;
2133 dev_t dev;
2134
2135 /* Find a minor for the device */
2136
2137 mutex_lock(&kgsl_driver.devlock);
2138 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2139 if (kgsl_driver.devp[minor] == NULL) {
2140 kgsl_driver.devp[minor] = device;
2141 break;
2142 }
2143 }
2144
2145 mutex_unlock(&kgsl_driver.devlock);
2146
2147 if (minor == KGSL_DEVICE_MAX) {
2148 KGSL_CORE_ERR("minor devices exhausted\n");
2149 return -ENODEV;
2150 }
2151
2152 /* Create the device */
2153 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2154 device->dev = device_create(kgsl_driver.class,
2155 device->parentdev,
2156 dev, device,
2157 device->name);
2158
2159 if (IS_ERR(device->dev)) {
2160 ret = PTR_ERR(device->dev);
2161 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
2162 goto err_devlist;
2163 }
2164
2165 dev_set_drvdata(device->parentdev, device);
2166
2167 /* Generic device initialization */
2168 init_waitqueue_head(&device->wait_queue);
2169
2170 kgsl_cffdump_open(device->id);
2171
2172 init_completion(&device->hwaccess_gate);
2173 init_completion(&device->suspend_gate);
2174
2175 ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
2176
2177 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2178 ret = kgsl_create_device_workqueue(device);
2179 if (ret)
2180 goto err_devlist;
2181
2182 INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
Jordan Crouse1bf80aa2011-10-12 16:57:47 -06002183 INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184
Jordan Croused4bc9d22011-11-17 13:39:21 -07002185 INIT_LIST_HEAD(&device->events);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186
2187 ret = kgsl_mmu_init(device);
2188 if (ret != 0)
2189 goto err_dest_work_q;
2190
2191 ret = kgsl_allocate_contiguous(&device->memstore,
2192 sizeof(struct kgsl_devmemstore));
2193
2194 if (ret != 0)
2195 goto err_close_mmu;
2196
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002197 if (cpu_is_msm8x60())
2198 wake_lock_init(&device->idle_wakelock,
2199 WAKE_LOCK_IDLE, device->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200
2201 idr_init(&device->context_idr);
2202
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002203 /* Initalize the snapshot engine */
2204 kgsl_device_snapshot_init(device);
2205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002206 /* sysfs and debugfs initalization - failure here is non fatal */
2207
2208 /* Initialize logging */
2209 kgsl_device_debugfs_init(device);
2210
2211 /* Initialize common sysfs entries */
2212 kgsl_pwrctrl_init_sysfs(device);
2213
2214 return 0;
2215
2216err_close_mmu:
2217 kgsl_mmu_close(device);
2218err_dest_work_q:
2219 destroy_workqueue(device->work_queue);
2220 device->work_queue = NULL;
2221err_devlist:
2222 mutex_lock(&kgsl_driver.devlock);
2223 kgsl_driver.devp[minor] = NULL;
2224 mutex_unlock(&kgsl_driver.devlock);
2225
2226 return ret;
2227}
2228EXPORT_SYMBOL(kgsl_register_device);
2229
2230int kgsl_device_platform_probe(struct kgsl_device *device,
2231 irqreturn_t (*dev_isr) (int, void*))
2232{
Michael Street8bacdd02012-01-05 14:55:01 -08002233 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002234 int status = -EINVAL;
2235 struct kgsl_memregion *regspace = NULL;
2236 struct resource *res;
2237 struct platform_device *pdev =
2238 container_of(device->parentdev, struct platform_device, dev);
2239
2240 pm_runtime_enable(device->parentdev);
2241
2242 status = kgsl_pwrctrl_init(device);
2243 if (status)
2244 goto error;
2245
2246 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2247 device->iomemname);
2248 if (res == NULL) {
2249 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2250 status = -EINVAL;
2251 goto error_pwrctrl_close;
2252 }
2253 if (res->start == 0 || resource_size(res) == 0) {
2254 KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
2255 status = -EINVAL;
2256 goto error_pwrctrl_close;
2257 }
2258
2259 regspace = &device->regspace;
2260 regspace->mmio_phys_base = res->start;
2261 regspace->sizebytes = resource_size(res);
2262
2263 if (!request_mem_region(regspace->mmio_phys_base,
2264 regspace->sizebytes, device->name)) {
2265 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2266 status = -ENODEV;
2267 goto error_pwrctrl_close;
2268 }
2269
2270 regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
2271 regspace->sizebytes);
2272
2273 if (regspace->mmio_virt_base == NULL) {
2274 KGSL_DRV_ERR(device, "ioremap failed\n");
2275 status = -ENODEV;
2276 goto error_release_mem;
2277 }
2278
2279 status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
2280 IRQF_TRIGGER_HIGH, device->name, device);
2281 if (status) {
2282 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2283 device->pwrctrl.interrupt_num, status);
2284 goto error_iounmap;
2285 }
2286 device->pwrctrl.have_irq = 1;
2287 disable_irq(device->pwrctrl.interrupt_num);
2288
2289 KGSL_DRV_INFO(device,
2290 "dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
2291 device->id, regspace->mmio_phys_base,
2292 regspace->sizebytes, regspace->mmio_virt_base);
2293
Michael Street8bacdd02012-01-05 14:55:01 -08002294 result = kgsl_drm_init(pdev);
2295 if (result)
2296 goto error_iounmap;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002297
2298 status = kgsl_register_device(device);
2299 if (!status)
2300 return status;
2301
2302 free_irq(device->pwrctrl.interrupt_num, NULL);
2303 device->pwrctrl.have_irq = 0;
2304error_iounmap:
2305 iounmap(regspace->mmio_virt_base);
2306 regspace->mmio_virt_base = NULL;
2307error_release_mem:
2308 release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
2309error_pwrctrl_close:
2310 kgsl_pwrctrl_close(device);
2311error:
2312 return status;
2313}
2314EXPORT_SYMBOL(kgsl_device_platform_probe);
2315
2316void kgsl_device_platform_remove(struct kgsl_device *device)
2317{
2318 struct kgsl_memregion *regspace = &device->regspace;
2319
2320 kgsl_unregister_device(device);
2321
2322 if (regspace->mmio_virt_base != NULL) {
2323 iounmap(regspace->mmio_virt_base);
2324 regspace->mmio_virt_base = NULL;
2325 release_mem_region(regspace->mmio_phys_base,
2326 regspace->sizebytes);
2327 }
2328 kgsl_pwrctrl_close(device);
2329
2330 pm_runtime_disable(device->parentdev);
2331}
2332EXPORT_SYMBOL(kgsl_device_platform_remove);
2333
2334static int __devinit
2335kgsl_ptdata_init(void)
2336{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002337 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(KGSL_PAGETABLE_SIZE,
2338 kgsl_pagetable_count);
2339 if (!kgsl_driver.ptpool)
2340 return -ENOMEM;
2341 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342}
2343
2344static void kgsl_core_exit(void)
2345{
2346 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
2347
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002348 kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
2349 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002350
2351 device_unregister(&kgsl_driver.virtdev);
2352
2353 if (kgsl_driver.class) {
2354 class_destroy(kgsl_driver.class);
2355 kgsl_driver.class = NULL;
2356 }
2357
2358 kgsl_drm_exit();
2359 kgsl_cffdump_destroy();
Jordan Croused8f1c6b2011-10-04 09:31:29 -06002360 kgsl_core_debugfs_close();
2361 kgsl_sharedmem_uninit_sysfs();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002362}
2363
2364static int __init kgsl_core_init(void)
2365{
2366 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002367 /* alloc major and minor device numbers */
2368 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2369 KGSL_NAME);
2370 if (result < 0) {
2371 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2372 goto err;
2373 }
2374
2375 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2376 kgsl_driver.cdev.owner = THIS_MODULE;
2377 kgsl_driver.cdev.ops = &kgsl_fops;
2378 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2379 KGSL_DEVICE_MAX);
2380
2381 if (result) {
2382 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2383 " result= %d\n", kgsl_driver.major, result);
2384 goto err;
2385 }
2386
2387 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2388
2389 if (IS_ERR(kgsl_driver.class)) {
2390 result = PTR_ERR(kgsl_driver.class);
2391 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2392 goto err;
2393 }
2394
2395 /* Make a virtual device for managing core related things
2396 in sysfs */
2397 kgsl_driver.virtdev.class = kgsl_driver.class;
2398 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2399 result = device_register(&kgsl_driver.virtdev);
2400 if (result) {
2401 KGSL_CORE_ERR("driver_register failed\n");
2402 goto err;
2403 }
2404
2405 /* Make kobjects in the virtual device for storing statistics */
2406
2407 kgsl_driver.ptkobj =
2408 kobject_create_and_add("pagetables",
2409 &kgsl_driver.virtdev.kobj);
2410
2411 kgsl_driver.prockobj =
2412 kobject_create_and_add("proc",
2413 &kgsl_driver.virtdev.kobj);
2414
2415 kgsl_core_debugfs_init();
2416
2417 kgsl_sharedmem_init_sysfs();
2418 kgsl_cffdump_init();
2419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002420 INIT_LIST_HEAD(&kgsl_driver.process_list);
2421
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002422 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2423
2424 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2425
2426 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2427 result = kgsl_ptdata_init();
2428 if (result)
2429 goto err;
2430 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002432 return 0;
2433
2434err:
2435 kgsl_core_exit();
2436 return result;
2437}
2438
2439module_init(kgsl_core_init);
2440module_exit(kgsl_core_exit);
2441
2442MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2443MODULE_DESCRIPTION("MSM GPU driver");
2444MODULE_LICENSE("GPL");