blob: 53b9e8a99f0e48ad9ee9280f06c991ad85c07a90 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/fb.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/debugfs.h>
17#include <linux/uaccess.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/android_pmem.h>
21#include <linux/vmalloc.h>
22#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070023#include <linux/genlock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include <linux/ashmem.h>
26#include <linux/major.h>
Jordan Crouse8eab35a2011-10-12 16:57:48 -060027#include <linux/ion.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070028#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include "kgsl.h"
31#include "kgsl_debugfs.h"
32#include "kgsl_cffdump.h"
33#include "kgsl_log.h"
34#include "kgsl_sharedmem.h"
35#include "kgsl_device.h"
36
37#undef MODULE_PARAM_PREFIX
38#define MODULE_PARAM_PREFIX "kgsl."
39
40static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060041static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042module_param_named(ptcount, kgsl_pagetable_count, int, 0);
43MODULE_PARM_DESC(kgsl_pagetable_count,
44"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060045module_param_named(mmutype, ksgl_mmu_type, charp, 0);
46MODULE_PARM_DESC(ksgl_mmu_type,
47"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
Jordan Crouse8eab35a2011-10-12 16:57:48 -060049static struct ion_client *kgsl_ion_client;
50
Jordan Croused4bc9d22011-11-17 13:39:21 -070051#ifdef CONFIG_GENLOCK
52
53/**
54 * kgsl_add_event - Add a new timstamp event for the KGSL device
55 * @device - KGSL device for the new event
56 * @ts - the timestamp to trigger the event on
57 * @cb - callback function to call when the timestamp expires
58 * @priv - private data for the specific event type
59 *
60 * @returns - 0 on success or error code on failure
61 */
62
63static int kgsl_add_event(struct kgsl_device *device, u32 ts,
64 void (*cb)(struct kgsl_device *, void *, u32), void *priv)
65{
66 struct kgsl_event *event;
67 struct list_head *n;
68 unsigned int cur = device->ftbl->readtimestamp(device,
69 KGSL_TIMESTAMP_RETIRED);
70
71 if (cb == NULL)
72 return -EINVAL;
73
74 /* Check to see if the requested timestamp has already fired */
75
76 if (timestamp_cmp(cur, ts) >= 0) {
77 cb(device, priv, cur);
78 return 0;
79 }
80
81 event = kzalloc(sizeof(*event), GFP_KERNEL);
82 if (event == NULL)
83 return -ENOMEM;
84
85 event->timestamp = ts;
86 event->priv = priv;
87 event->func = cb;
88
89 /* Add the event in order to the list */
90
91 for (n = device->events.next ; n != &device->events; n = n->next) {
92 struct kgsl_event *e =
93 list_entry(n, struct kgsl_event, list);
94
95 if (timestamp_cmp(e->timestamp, ts) > 0) {
96 list_add(&event->list, n->prev);
97 break;
98 }
99 }
100
101 if (n == &device->events)
102 list_add_tail(&event->list, &device->events);
103
104 return 0;
105}
106#endif
107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108static inline struct kgsl_mem_entry *
109kgsl_mem_entry_create(void)
110{
111 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
112
113 if (!entry)
114 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
115 else
116 kref_init(&entry->refcount);
117
118 return entry;
119}
120
121void
122kgsl_mem_entry_destroy(struct kref *kref)
123{
124 struct kgsl_mem_entry *entry = container_of(kref,
125 struct kgsl_mem_entry,
126 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600127
128 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
129
130 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
131 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600133 /*
134 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
135 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
136 * doesn't try to free it again
137 */
138
139 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
140 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
141 entry->memdesc.sg = NULL;
142 }
143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 kgsl_sharedmem_free(&entry->memdesc);
145
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600146 switch (entry->memtype) {
147 case KGSL_MEM_ENTRY_PMEM:
148 case KGSL_MEM_ENTRY_ASHMEM:
149 if (entry->priv_data)
150 fput(entry->priv_data);
151 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600152 case KGSL_MEM_ENTRY_ION:
153 ion_free(kgsl_ion_client, entry->priv_data);
154 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 }
156
157 kfree(entry);
158}
159EXPORT_SYMBOL(kgsl_mem_entry_destroy);
160
161static
162void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
163 struct kgsl_process_private *process)
164{
165 spin_lock(&process->mem_lock);
166 list_add(&entry->list, &process->mem_list);
167 spin_unlock(&process->mem_lock);
168
169 entry->priv = process;
170}
171
172/* Allocate a new context id */
173
174static struct kgsl_context *
175kgsl_create_context(struct kgsl_device_private *dev_priv)
176{
177 struct kgsl_context *context;
178 int ret, id;
179
180 context = kzalloc(sizeof(*context), GFP_KERNEL);
181
182 if (context == NULL)
183 return NULL;
184
185 while (1) {
186 if (idr_pre_get(&dev_priv->device->context_idr,
187 GFP_KERNEL) == 0) {
188 kfree(context);
189 return NULL;
190 }
191
192 ret = idr_get_new(&dev_priv->device->context_idr,
193 context, &id);
194
195 if (ret != -EAGAIN)
196 break;
197 }
198
199 if (ret) {
200 kfree(context);
201 return NULL;
202 }
203
204 context->id = id;
205 context->dev_priv = dev_priv;
206
207 return context;
208}
209
210static void
211kgsl_destroy_context(struct kgsl_device_private *dev_priv,
212 struct kgsl_context *context)
213{
214 int id;
215
216 if (context == NULL)
217 return;
218
219 /* Fire a bug if the devctxt hasn't been freed */
220 BUG_ON(context->devctxt);
221
222 id = context->id;
223 kfree(context);
224
225 idr_remove(&dev_priv->device->context_idr, id);
226}
227
228/* to be called when a process is destroyed, this walks the memqueue and
229 * frees any entryies that belong to the dying process
230 */
231static void kgsl_memqueue_cleanup(struct kgsl_device *device,
232 struct kgsl_process_private *private)
233{
234 struct kgsl_mem_entry *entry, *entry_tmp;
235
236 if (!private)
237 return;
238
239 BUG_ON(!mutex_is_locked(&device->mutex));
240
241 list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
242 if (entry->priv == private) {
243 list_del(&entry->list);
244 kgsl_mem_entry_put(entry);
245 }
246 }
247}
248
249static void kgsl_memqueue_freememontimestamp(struct kgsl_device *device,
250 struct kgsl_mem_entry *entry,
251 uint32_t timestamp,
252 enum kgsl_timestamp_type type)
253{
254 BUG_ON(!mutex_is_locked(&device->mutex));
255
256 entry->free_timestamp = timestamp;
257
258 list_add_tail(&entry->list, &device->memqueue);
259}
260
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600261static void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600263 struct kgsl_device *device = container_of(work, struct kgsl_device,
264 ts_expired_ws);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 struct kgsl_mem_entry *entry, *entry_tmp;
Jordan Croused4bc9d22011-11-17 13:39:21 -0700266 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 uint32_t ts_processed;
268
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600269 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270
271 /* get current EOP timestamp */
272 ts_processed = device->ftbl->readtimestamp(device,
273 KGSL_TIMESTAMP_RETIRED);
274
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600275 /* Flush the freememontimestamp queue */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
Jordan Crousee6239dd2011-11-17 13:39:21 -0700277 if (timestamp_cmp(ts_processed, entry->free_timestamp) < 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 break;
279
280 list_del(&entry->list);
281 kgsl_mem_entry_put(entry);
282 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
Jordan Croused4bc9d22011-11-17 13:39:21 -0700284 /* Process expired events */
285 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
286 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
287 break;
288
289 if (event->func)
290 event->func(device, event->priv, ts_processed);
291
292 list_del(&event->list);
293 kfree(event);
294 }
295
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 mutex_unlock(&device->mutex);
297}
298
299static void kgsl_check_idle_locked(struct kgsl_device *device)
300{
301 if (device->pwrctrl.nap_allowed == true &&
302 device->state == KGSL_STATE_ACTIVE &&
303 device->requested_state == KGSL_STATE_NONE) {
304 device->requested_state = KGSL_STATE_NAP;
305 if (kgsl_pwrctrl_sleep(device) != 0)
306 mod_timer(&device->idle_timer,
307 jiffies +
308 device->pwrctrl.interval_timeout);
309 }
310}
311
312static void kgsl_check_idle(struct kgsl_device *device)
313{
314 mutex_lock(&device->mutex);
315 kgsl_check_idle_locked(device);
316 mutex_unlock(&device->mutex);
317}
318
319struct kgsl_device *kgsl_get_device(int dev_idx)
320{
321 int i;
322 struct kgsl_device *ret = NULL;
323
324 mutex_lock(&kgsl_driver.devlock);
325
326 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
327 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
328 ret = kgsl_driver.devp[i];
329 break;
330 }
331 }
332
333 mutex_unlock(&kgsl_driver.devlock);
334 return ret;
335}
336EXPORT_SYMBOL(kgsl_get_device);
337
338static struct kgsl_device *kgsl_get_minor(int minor)
339{
340 struct kgsl_device *ret = NULL;
341
342 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
343 return NULL;
344
345 mutex_lock(&kgsl_driver.devlock);
346 ret = kgsl_driver.devp[minor];
347 mutex_unlock(&kgsl_driver.devlock);
348
349 return ret;
350}
351
352int kgsl_register_ts_notifier(struct kgsl_device *device,
353 struct notifier_block *nb)
354{
355 BUG_ON(device == NULL);
356 return atomic_notifier_chain_register(&device->ts_notifier_list,
357 nb);
358}
359EXPORT_SYMBOL(kgsl_register_ts_notifier);
360
361int kgsl_unregister_ts_notifier(struct kgsl_device *device,
362 struct notifier_block *nb)
363{
364 BUG_ON(device == NULL);
365 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
366 nb);
367}
368EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
369
370int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
371{
372 unsigned int ts_processed;
373
374 ts_processed = device->ftbl->readtimestamp(device,
375 KGSL_TIMESTAMP_RETIRED);
376
Jordan Crousee6239dd2011-11-17 13:39:21 -0700377 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700378}
379EXPORT_SYMBOL(kgsl_check_timestamp);
380
381static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
382{
383 int status = -EINVAL;
384 unsigned int nap_allowed_saved;
385 struct kgsl_pwrscale_policy *policy_saved;
386
387 if (!device)
388 return -EINVAL;
389
390 KGSL_PWR_WARN(device, "suspend start\n");
391
392 mutex_lock(&device->mutex);
393 nap_allowed_saved = device->pwrctrl.nap_allowed;
394 device->pwrctrl.nap_allowed = false;
395 policy_saved = device->pwrscale.policy;
396 device->pwrscale.policy = NULL;
397 device->requested_state = KGSL_STATE_SUSPEND;
398 /* Make sure no user process is waiting for a timestamp *
399 * before supending */
400 if (device->active_cnt != 0) {
401 mutex_unlock(&device->mutex);
402 wait_for_completion(&device->suspend_gate);
403 mutex_lock(&device->mutex);
404 }
405 /* Don't let the timer wake us during suspended sleep. */
Jeremy Gebben1757a852011-07-11 16:04:38 -0600406 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 switch (device->state) {
408 case KGSL_STATE_INIT:
409 break;
410 case KGSL_STATE_ACTIVE:
411 /* Wait for the device to become idle */
412 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
413 case KGSL_STATE_NAP:
414 case KGSL_STATE_SLEEP:
415 /* Get the completion ready to be waited upon. */
416 INIT_COMPLETION(device->hwaccess_gate);
417 device->ftbl->suspend_context(device);
418 device->ftbl->stop(device);
419 device->state = KGSL_STATE_SUSPEND;
420 KGSL_PWR_WARN(device, "state -> SUSPEND, device %d\n",
421 device->id);
422 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700423 case KGSL_STATE_SLUMBER:
424 INIT_COMPLETION(device->hwaccess_gate);
425 device->state = KGSL_STATE_SUSPEND;
426 KGSL_PWR_WARN(device, "state -> SUSPEND, device %d\n",
427 device->id);
428 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 default:
430 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
431 device->id);
432 goto end;
433 }
434 device->requested_state = KGSL_STATE_NONE;
435 device->pwrctrl.nap_allowed = nap_allowed_saved;
436 device->pwrscale.policy = policy_saved;
437 status = 0;
438
439end:
440 mutex_unlock(&device->mutex);
441 KGSL_PWR_WARN(device, "suspend end\n");
442 return status;
443}
444
445static int kgsl_resume_device(struct kgsl_device *device)
446{
447 int status = -EINVAL;
448
449 if (!device)
450 return -EINVAL;
451
452 KGSL_PWR_WARN(device, "resume start\n");
453 mutex_lock(&device->mutex);
454 if (device->state == KGSL_STATE_SUSPEND) {
Suman Tatiraju24569022011-10-27 11:11:12 -0700455 device->state = KGSL_STATE_SLUMBER;
456 status = 0;
457 KGSL_PWR_WARN(device,
458 "state -> SLUMBER, device %d\n",
459 device->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 complete_all(&device->hwaccess_gate);
461 }
462 device->requested_state = KGSL_STATE_NONE;
463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 KGSL_PWR_WARN(device, "resume end\n");
466 return status;
467}
468
469static int kgsl_suspend(struct device *dev)
470{
471
472 pm_message_t arg = {0};
473 struct kgsl_device *device = dev_get_drvdata(dev);
474 return kgsl_suspend_device(device, arg);
475}
476
477static int kgsl_resume(struct device *dev)
478{
479 struct kgsl_device *device = dev_get_drvdata(dev);
480 return kgsl_resume_device(device);
481}
482
483static int kgsl_runtime_suspend(struct device *dev)
484{
485 return 0;
486}
487
488static int kgsl_runtime_resume(struct device *dev)
489{
490 return 0;
491}
492
493const struct dev_pm_ops kgsl_pm_ops = {
494 .suspend = kgsl_suspend,
495 .resume = kgsl_resume,
496 .runtime_suspend = kgsl_runtime_suspend,
497 .runtime_resume = kgsl_runtime_resume,
498};
499EXPORT_SYMBOL(kgsl_pm_ops);
500
501void kgsl_early_suspend_driver(struct early_suspend *h)
502{
503 struct kgsl_device *device = container_of(h,
504 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700505 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530506 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700507 device->requested_state = KGSL_STATE_SLUMBER;
508 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530509 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700510 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511}
512EXPORT_SYMBOL(kgsl_early_suspend_driver);
513
514int kgsl_suspend_driver(struct platform_device *pdev,
515 pm_message_t state)
516{
517 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
518 return kgsl_suspend_device(device, state);
519}
520EXPORT_SYMBOL(kgsl_suspend_driver);
521
522int kgsl_resume_driver(struct platform_device *pdev)
523{
524 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
525 return kgsl_resume_device(device);
526}
527EXPORT_SYMBOL(kgsl_resume_driver);
528
529void kgsl_late_resume_driver(struct early_suspend *h)
530{
531 struct kgsl_device *device = container_of(h,
532 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700533 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530534 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700535 kgsl_pwrctrl_wake(device);
536 device->pwrctrl.restore_slumber = 0;
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530537 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700538 kgsl_check_idle(device);
539 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540}
541EXPORT_SYMBOL(kgsl_late_resume_driver);
542
543/* file operations */
544static struct kgsl_process_private *
545kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
546{
547 struct kgsl_process_private *private;
548
549 mutex_lock(&kgsl_driver.process_mutex);
550 list_for_each_entry(private, &kgsl_driver.process_list, list) {
551 if (private->pid == task_tgid_nr(current)) {
552 private->refcnt++;
553 goto out;
554 }
555 }
556
557 /* no existing process private found for this dev_priv, create one */
558 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
559 if (private == NULL) {
560 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
561 sizeof(struct kgsl_process_private));
562 goto out;
563 }
564
565 spin_lock_init(&private->mem_lock);
566 private->refcnt = 1;
567 private->pid = task_tgid_nr(current);
568
569 INIT_LIST_HEAD(&private->mem_list);
570
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600571 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 {
573 unsigned long pt_name;
574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 private->pagetable = kgsl_mmu_getpagetable(pt_name);
577 if (private->pagetable == NULL) {
578 kfree(private);
579 private = NULL;
580 goto out;
581 }
582 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700583
584 list_add(&private->list, &kgsl_driver.process_list);
585
586 kgsl_process_init_sysfs(private);
587
588out:
589 mutex_unlock(&kgsl_driver.process_mutex);
590 return private;
591}
592
593static void
594kgsl_put_process_private(struct kgsl_device *device,
595 struct kgsl_process_private *private)
596{
597 struct kgsl_mem_entry *entry = NULL;
598 struct kgsl_mem_entry *entry_tmp = NULL;
599
600 if (!private)
601 return;
602
603 mutex_lock(&kgsl_driver.process_mutex);
604
605 if (--private->refcnt)
606 goto unlock;
607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700608 kgsl_process_uninit_sysfs(private);
609
610 list_del(&private->list);
611
612 list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
613 list_del(&entry->list);
614 kgsl_mem_entry_put(entry);
615 }
616
617 kgsl_mmu_putpagetable(private->pagetable);
618 kfree(private);
619unlock:
620 mutex_unlock(&kgsl_driver.process_mutex);
621}
622
623static int kgsl_release(struct inode *inodep, struct file *filep)
624{
625 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600626 struct kgsl_device_private *dev_priv = filep->private_data;
627 struct kgsl_process_private *private = dev_priv->process_priv;
628 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629 struct kgsl_context *context;
630 int next = 0;
631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 filep->private_data = NULL;
633
634 mutex_lock(&device->mutex);
635 kgsl_check_suspended(device);
636
637 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600638 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700639 if (context == NULL)
640 break;
641
642 if (context->dev_priv == dev_priv) {
643 device->ftbl->drawctxt_destroy(device, context);
644 kgsl_destroy_context(dev_priv, context);
645 }
646
647 next = next + 1;
648 }
649
650 device->open_count--;
651 if (device->open_count == 0) {
652 result = device->ftbl->stop(device);
653 device->state = KGSL_STATE_INIT;
654 KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
655 }
656 /* clean up any to-be-freed entries that belong to this
657 * process and this device
658 */
659 kgsl_memqueue_cleanup(device, private);
660
661 mutex_unlock(&device->mutex);
662 kfree(dev_priv);
663
664 kgsl_put_process_private(device, private);
665
666 pm_runtime_put(device->parentdev);
667 return result;
668}
669
670static int kgsl_open(struct inode *inodep, struct file *filep)
671{
672 int result;
673 struct kgsl_device_private *dev_priv;
674 struct kgsl_device *device;
675 unsigned int minor = iminor(inodep);
676
677 device = kgsl_get_minor(minor);
678 BUG_ON(device == NULL);
679
680 if (filep->f_flags & O_EXCL) {
681 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
682 return -EBUSY;
683 }
684
685 result = pm_runtime_get_sync(device->parentdev);
686 if (result < 0) {
687 KGSL_DRV_ERR(device,
688 "Runtime PM: Unable to wake up the device, rc = %d\n",
689 result);
690 return result;
691 }
692 result = 0;
693
694 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
695 if (dev_priv == NULL) {
696 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
697 sizeof(struct kgsl_device_private));
698 result = -ENOMEM;
699 goto err_pmruntime;
700 }
701
702 dev_priv->device = device;
703 filep->private_data = dev_priv;
704
705 /* Get file (per process) private struct */
706 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
707 if (dev_priv->process_priv == NULL) {
708 result = -ENOMEM;
709 goto err_freedevpriv;
710 }
711
712 mutex_lock(&device->mutex);
713 kgsl_check_suspended(device);
714
715 if (device->open_count == 0) {
716 result = device->ftbl->start(device, true);
717
718 if (result) {
719 mutex_unlock(&device->mutex);
720 goto err_putprocess;
721 }
722 device->state = KGSL_STATE_ACTIVE;
723 KGSL_PWR_WARN(device,
724 "state -> ACTIVE, device %d\n", minor);
725 }
726 device->open_count++;
727 mutex_unlock(&device->mutex);
728
729 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
730 device->name, kgsl_mmu_enabled() ? "on" : "off",
731 kgsl_pagetable_count);
732
733 return result;
734
735err_putprocess:
736 kgsl_put_process_private(device, dev_priv->process_priv);
737err_freedevpriv:
738 filep->private_data = NULL;
739 kfree(dev_priv);
740err_pmruntime:
741 pm_runtime_put(device->parentdev);
742 return result;
743}
744
745
746/*call with private->mem_lock locked */
747static struct kgsl_mem_entry *
748kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
749{
750 struct kgsl_mem_entry *entry = NULL, *result = NULL;
751
752 BUG_ON(private == NULL);
753
754 gpuaddr &= PAGE_MASK;
755
756 list_for_each_entry(entry, &private->mem_list, list) {
757 if (entry->memdesc.gpuaddr == gpuaddr) {
758 result = entry;
759 break;
760 }
761 }
762 return result;
763}
764
765/*call with private->mem_lock locked */
766struct kgsl_mem_entry *
767kgsl_sharedmem_find_region(struct kgsl_process_private *private,
768 unsigned int gpuaddr,
769 size_t size)
770{
771 struct kgsl_mem_entry *entry = NULL, *result = NULL;
772
773 BUG_ON(private == NULL);
774
775 list_for_each_entry(entry, &private->mem_list, list) {
776 if (gpuaddr >= entry->memdesc.gpuaddr &&
777 ((gpuaddr + size) <=
778 (entry->memdesc.gpuaddr + entry->memdesc.size))) {
779 result = entry;
780 break;
781 }
782 }
783
784 return result;
785}
786EXPORT_SYMBOL(kgsl_sharedmem_find_region);
787
788uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
789 unsigned int gpuaddr, unsigned int *size)
790{
791 BUG_ON(memdesc->hostptr == NULL);
792
793 if (memdesc->gpuaddr == 0 || (gpuaddr < memdesc->gpuaddr ||
794 gpuaddr >= memdesc->gpuaddr + memdesc->size))
795 return NULL;
796
797 *size = memdesc->size - (gpuaddr - memdesc->gpuaddr);
798 return memdesc->hostptr + (gpuaddr - memdesc->gpuaddr);
799}
800EXPORT_SYMBOL(kgsl_gpuaddr_to_vaddr);
801
802/*call all ioctl sub functions with driver locked*/
803static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
804 unsigned int cmd, void *data)
805{
806 int result = 0;
807 struct kgsl_device_getproperty *param = data;
808
809 switch (param->type) {
810 case KGSL_PROP_VERSION:
811 {
812 struct kgsl_version version;
813 if (param->sizebytes != sizeof(version)) {
814 result = -EINVAL;
815 break;
816 }
817
818 version.drv_major = KGSL_VERSION_MAJOR;
819 version.drv_minor = KGSL_VERSION_MINOR;
820 version.dev_major = dev_priv->device->ver_major;
821 version.dev_minor = dev_priv->device->ver_minor;
822
823 if (copy_to_user(param->value, &version, sizeof(version)))
824 result = -EFAULT;
825
826 break;
827 }
828 default:
829 result = dev_priv->device->ftbl->getproperty(
830 dev_priv->device, param->type,
831 param->value, param->sizebytes);
832 }
833
834
835 return result;
836}
837
838static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
839 *dev_priv, unsigned int cmd,
840 void *data)
841{
842 int result = 0;
843 struct kgsl_device_waittimestamp *param = data;
844
845 /* Set the active count so that suspend doesn't do the
846 wrong thing */
847
848 dev_priv->device->active_cnt++;
849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
851 param->timestamp,
852 param->timeout);
853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 /* Fire off any pending suspend operations that are in flight */
855
856 INIT_COMPLETION(dev_priv->device->suspend_gate);
857 dev_priv->device->active_cnt--;
858 complete(&dev_priv->device->suspend_gate);
859
860 return result;
861}
862static bool check_ibdesc(struct kgsl_device_private *dev_priv,
863 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
864 bool parse)
865{
866 bool result = true;
867 unsigned int i;
868 for (i = 0; i < numibs; i++) {
869 struct kgsl_mem_entry *entry;
870 spin_lock(&dev_priv->process_priv->mem_lock);
871 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
872 ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
873 spin_unlock(&dev_priv->process_priv->mem_lock);
874 if (entry == NULL) {
875 KGSL_DRV_ERR(dev_priv->device,
876 "invalid cmd buffer gpuaddr %08x " \
877 "sizedwords %d\n", ibdesc[i].gpuaddr,
878 ibdesc[i].sizedwords);
879 result = false;
880 break;
881 }
882
883 if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
884 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
885 KGSL_DRV_ERR(dev_priv->device,
886 "invalid cmd buffer gpuaddr %08x " \
887 "sizedwords %d numibs %d/%d\n",
888 ibdesc[i].gpuaddr,
889 ibdesc[i].sizedwords, i+1, numibs);
890 result = false;
891 break;
892 }
893 }
894 return result;
895}
896
897static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
898 unsigned int cmd, void *data)
899{
900 int result = 0;
901 struct kgsl_ringbuffer_issueibcmds *param = data;
902 struct kgsl_ibdesc *ibdesc;
903 struct kgsl_context *context;
904
905#ifdef CONFIG_MSM_KGSL_DRM
906 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
907#endif
908
909 context = kgsl_find_context(dev_priv, param->drawctxt_id);
910 if (context == NULL) {
911 result = -EINVAL;
912 KGSL_DRV_ERR(dev_priv->device,
913 "invalid drawctxt drawctxt_id %d\n",
914 param->drawctxt_id);
915 goto done;
916 }
917
918 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
919 KGSL_DRV_INFO(dev_priv->device,
920 "Using IB list mode for ib submission, numibs: %d\n",
921 param->numibs);
922 if (!param->numibs) {
923 KGSL_DRV_ERR(dev_priv->device,
924 "Invalid numibs as parameter: %d\n",
925 param->numibs);
926 result = -EINVAL;
927 goto done;
928 }
929
930 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
931 GFP_KERNEL);
932 if (!ibdesc) {
933 KGSL_MEM_ERR(dev_priv->device,
934 "kzalloc(%d) failed\n",
935 sizeof(struct kgsl_ibdesc) * param->numibs);
936 result = -ENOMEM;
937 goto done;
938 }
939
940 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
941 sizeof(struct kgsl_ibdesc) * param->numibs)) {
942 result = -EFAULT;
943 KGSL_DRV_ERR(dev_priv->device,
944 "copy_from_user failed\n");
945 goto free_ibdesc;
946 }
947 } else {
948 KGSL_DRV_INFO(dev_priv->device,
949 "Using single IB submission mode for ib submission\n");
950 /* If user space driver is still using the old mode of
951 * submitting single ib then we need to support that as well */
952 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
953 if (!ibdesc) {
954 KGSL_MEM_ERR(dev_priv->device,
955 "kzalloc(%d) failed\n",
956 sizeof(struct kgsl_ibdesc));
957 result = -ENOMEM;
958 goto done;
959 }
960 ibdesc[0].gpuaddr = param->ibdesc_addr;
961 ibdesc[0].sizedwords = param->numibs;
962 param->numibs = 1;
963 }
964
965 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
966 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
967 result = -EINVAL;
968 goto free_ibdesc;
969 }
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
972 context,
973 ibdesc,
974 param->numibs,
975 &param->timestamp,
976 param->flags);
977
978 if (result != 0)
979 goto free_ibdesc;
980
981 /* this is a check to try to detect if a command buffer was freed
982 * during issueibcmds().
983 */
984 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
985 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
986 result = -EINVAL;
987 goto free_ibdesc;
988 }
989
990free_ibdesc:
991 kfree(ibdesc);
992done:
993
994#ifdef CONFIG_MSM_KGSL_DRM
995 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
996#endif
997
998 return result;
999}
1000
1001static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1002 *dev_priv, unsigned int cmd,
1003 void *data)
1004{
1005 struct kgsl_cmdstream_readtimestamp *param = data;
1006
1007 param->timestamp =
1008 dev_priv->device->ftbl->readtimestamp(dev_priv->device,
1009 param->type);
1010
1011 return 0;
1012}
1013
1014static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1015 *dev_priv, unsigned int cmd,
1016 void *data)
1017{
1018 int result = 0;
1019 struct kgsl_cmdstream_freememontimestamp *param = data;
1020 struct kgsl_mem_entry *entry = NULL;
1021
1022 spin_lock(&dev_priv->process_priv->mem_lock);
1023 entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
1024 if (entry)
1025 list_del(&entry->list);
1026 spin_unlock(&dev_priv->process_priv->mem_lock);
1027
1028 if (entry) {
1029 kgsl_memqueue_freememontimestamp(dev_priv->device, entry,
1030 param->timestamp, param->type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 } else {
1032 KGSL_DRV_ERR(dev_priv->device,
1033 "invalid gpuaddr %08x\n", param->gpuaddr);
1034 result = -EINVAL;
1035 }
1036
1037 return result;
1038}
1039
1040static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1041 unsigned int cmd, void *data)
1042{
1043 int result = 0;
1044 struct kgsl_drawctxt_create *param = data;
1045 struct kgsl_context *context = NULL;
1046
1047 context = kgsl_create_context(dev_priv);
1048
1049 if (context == NULL) {
1050 result = -ENOMEM;
1051 goto done;
1052 }
1053
1054 if (dev_priv->device->ftbl->drawctxt_create)
1055 result = dev_priv->device->ftbl->drawctxt_create(
1056 dev_priv->device, dev_priv->process_priv->pagetable,
1057 context, param->flags);
1058
1059 param->drawctxt_id = context->id;
1060
1061done:
1062 if (result && context)
1063 kgsl_destroy_context(dev_priv, context);
1064
1065 return result;
1066}
1067
1068static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1069 unsigned int cmd, void *data)
1070{
1071 int result = 0;
1072 struct kgsl_drawctxt_destroy *param = data;
1073 struct kgsl_context *context;
1074
1075 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1076
1077 if (context == NULL) {
1078 result = -EINVAL;
1079 goto done;
1080 }
1081
1082 if (dev_priv->device->ftbl->drawctxt_destroy)
1083 dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
1084 context);
1085
1086 kgsl_destroy_context(dev_priv, context);
1087
1088done:
1089 return result;
1090}
1091
1092static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1093 unsigned int cmd, void *data)
1094{
1095 int result = 0;
1096 struct kgsl_sharedmem_free *param = data;
1097 struct kgsl_process_private *private = dev_priv->process_priv;
1098 struct kgsl_mem_entry *entry = NULL;
1099
1100 spin_lock(&private->mem_lock);
1101 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1102 if (entry)
1103 list_del(&entry->list);
1104 spin_unlock(&private->mem_lock);
1105
1106 if (entry) {
1107 kgsl_mem_entry_put(entry);
1108 } else {
1109 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1110 result = -EINVAL;
1111 }
1112
1113 return result;
1114}
1115
1116static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1117{
1118 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001119
1120 down_read(&current->mm->mmap_sem);
1121 vma = find_vma(current->mm, addr);
1122 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001123 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126 return vma;
1127}
1128
1129static long
1130kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
1131 unsigned int cmd, void *data)
1132{
1133 int result = 0, len = 0;
1134 struct kgsl_process_private *private = dev_priv->process_priv;
1135 struct kgsl_sharedmem_from_vmalloc *param = data;
1136 struct kgsl_mem_entry *entry = NULL;
1137 struct vm_area_struct *vma;
1138
1139 if (!kgsl_mmu_enabled())
1140 return -ENODEV;
1141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001142 if (!param->hostptr) {
1143 KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
1144 result = -EINVAL;
1145 goto error;
1146 }
1147
1148 vma = kgsl_get_vma_from_start_addr(param->hostptr);
1149 if (!vma) {
1150 result = -EINVAL;
1151 goto error;
1152 }
Jordan Crouse2c542b62011-07-26 08:30:20 -06001153
1154 /*
1155 * If the user specified a length, use it, otherwise try to
1156 * infer the length if the vma region
1157 */
1158 if (param->gpuaddr != 0) {
1159 len = param->gpuaddr;
1160 } else {
1161 /*
1162 * For this to work, we have to assume the VMA region is only
1163 * for this single allocation. If it isn't, then bail out
1164 */
1165 if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
1166 KGSL_CORE_ERR("VMA region does not match hostaddr\n");
1167 result = -EINVAL;
1168 goto error;
1169 }
1170
1171 len = vma->vm_end - vma->vm_start;
1172 }
1173
1174 /* Make sure it fits */
1175 if (len == 0 || param->hostptr + len > vma->vm_end) {
1176 KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 result = -EINVAL;
1178 goto error;
1179 }
1180
1181 entry = kgsl_mem_entry_create();
1182 if (entry == NULL) {
1183 result = -ENOMEM;
1184 goto error;
1185 }
1186
1187 result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
1188 private->pagetable, len,
1189 param->flags);
1190 if (result != 0)
1191 goto error_free_entry;
1192
1193 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1194
1195 result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
1196 if (result) {
1197 KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
1198 goto error_free_vmalloc;
1199 }
1200
1201 param->gpuaddr = entry->memdesc.gpuaddr;
1202
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001203 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204
1205 kgsl_mem_entry_attach_process(entry, private);
1206
1207 /* Process specific statistics */
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001208 kgsl_process_add_stats(private, entry->memtype, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209
1210 kgsl_check_idle(dev_priv->device);
1211 return 0;
1212
1213error_free_vmalloc:
1214 kgsl_sharedmem_free(&entry->memdesc);
1215
1216error_free_entry:
1217 kfree(entry);
1218
1219error:
1220 kgsl_check_idle(dev_priv->device);
1221 return result;
1222}
1223
1224static inline int _check_region(unsigned long start, unsigned long size,
1225 uint64_t len)
1226{
1227 uint64_t end = ((uint64_t) start) + size;
1228 return (end > len);
1229}
1230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1232 unsigned long *vstart, struct file **filep)
1233{
1234 struct file *fbfile;
1235 int ret = 0;
1236 dev_t rdev;
1237 struct fb_info *info;
1238
1239 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001240#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001241 if (!get_pmem_file(fd, start, vstart, len, filep))
1242 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001243#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001244
1245 fbfile = fget(fd);
1246 if (fbfile == NULL) {
1247 KGSL_CORE_ERR("fget_light failed\n");
1248 return -1;
1249 }
1250
1251 rdev = fbfile->f_dentry->d_inode->i_rdev;
1252 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1253 if (info) {
1254 *start = info->fix.smem_start;
1255 *len = info->fix.smem_len;
1256 *vstart = (unsigned long)__va(info->fix.smem_start);
1257 ret = 0;
1258 } else {
1259 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1260 MINOR(rdev));
1261 ret = -1;
1262 }
1263
1264 fput(fbfile);
1265
1266 return ret;
1267}
1268
1269static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1270 struct kgsl_pagetable *pagetable,
1271 unsigned int fd, unsigned int offset,
1272 size_t size)
1273{
1274 int ret;
1275 unsigned long phys, virt, len;
1276 struct file *filep;
1277
1278 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1279 if (ret)
1280 return ret;
1281
Wei Zou4061c0b2011-07-08 10:24:22 -07001282 if (phys == 0) {
1283 ret = -EINVAL;
1284 goto err;
1285 }
1286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 if (offset >= len) {
1288 ret = -EINVAL;
1289 goto err;
1290 }
1291
1292 if (size == 0)
1293 size = len;
1294
1295 /* Adjust the size of the region to account for the offset */
1296 size += offset & ~PAGE_MASK;
1297
1298 size = ALIGN(size, PAGE_SIZE);
1299
1300 if (_check_region(offset & PAGE_MASK, size, len)) {
1301 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1302 "than pmem region length %ld\n",
1303 offset & PAGE_MASK, size, len);
1304 ret = -EINVAL;
1305 goto err;
1306
1307 }
1308
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001309 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310
1311 entry->memdesc.pagetable = pagetable;
1312 entry->memdesc.size = size;
1313 entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
1314 entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001315
1316 ret = memdesc_sg_phys(&entry->memdesc,
1317 phys + (offset & PAGE_MASK), size);
1318 if (ret)
1319 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320
1321 return 0;
1322err:
Jordan Crousefd978432011-09-02 14:34:32 -06001323#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001325#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 return ret;
1327}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328
Jordan Croused17e9aa2011-10-12 16:57:48 -06001329static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1330 void *addr, int size)
1331{
1332 int i;
1333 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1334 unsigned long paddr = (unsigned long) addr;
1335
1336 memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist),
1337 GFP_KERNEL);
1338 if (memdesc->sg == NULL)
1339 return -ENOMEM;
1340
1341 memdesc->sglen = sglen;
1342 sg_init_table(memdesc->sg, sglen);
1343
1344 spin_lock(&current->mm->page_table_lock);
1345
1346 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1347 struct page *page;
1348 pmd_t *ppmd;
1349 pte_t *ppte;
1350 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1351
1352 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1353 goto err;
1354
1355 ppmd = pmd_offset(ppgd, paddr);
1356 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1357 goto err;
1358
1359 ppte = pte_offset_map(ppmd, paddr);
1360 if (ppte == NULL)
1361 goto err;
1362
1363 page = pfn_to_page(pte_pfn(*ppte));
1364 if (!page)
1365 goto err;
1366
1367 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1368 pte_unmap(ppte);
1369 }
1370
1371 spin_unlock(&current->mm->page_table_lock);
1372
1373 return 0;
1374
1375err:
1376 spin_unlock(&current->mm->page_table_lock);
1377 kfree(memdesc->sg);
1378 memdesc->sg = NULL;
1379
1380 return -EINVAL;
1381}
1382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1384 struct kgsl_pagetable *pagetable,
1385 void *hostptr, unsigned int offset,
1386 size_t size)
1387{
1388 struct vm_area_struct *vma;
1389 unsigned int len;
1390
1391 down_read(&current->mm->mmap_sem);
1392 vma = find_vma(current->mm, (unsigned int) hostptr);
1393 up_read(&current->mm->mmap_sem);
1394
1395 if (!vma) {
1396 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1397 return -EINVAL;
1398 }
1399
1400 /* We don't necessarily start at vma->vm_start */
1401 len = vma->vm_end - (unsigned long) hostptr;
1402
1403 if (offset >= len)
1404 return -EINVAL;
1405
1406 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1407 !KGSL_IS_PAGE_ALIGNED(len)) {
1408 KGSL_CORE_ERR("user address len(%u)"
1409 "and start(%p) must be page"
1410 "aligned\n", len, hostptr);
1411 return -EINVAL;
1412 }
1413
1414 if (size == 0)
1415 size = len;
1416
1417 /* Adjust the size of the region to account for the offset */
1418 size += offset & ~PAGE_MASK;
1419
1420 size = ALIGN(size, PAGE_SIZE);
1421
1422 if (_check_region(offset & PAGE_MASK, size, len)) {
1423 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1424 "than region length %d\n",
1425 offset & PAGE_MASK, size, len);
1426 return -EINVAL;
1427 }
1428
1429 entry->memdesc.pagetable = pagetable;
1430 entry->memdesc.size = size;
1431 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001432
Jordan Croused17e9aa2011-10-12 16:57:48 -06001433 return memdesc_sg_virt(&entry->memdesc,
1434 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001435}
1436
1437#ifdef CONFIG_ASHMEM
1438static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1439 struct kgsl_pagetable *pagetable,
1440 int fd, void *hostptr, size_t size)
1441{
1442 int ret;
1443 struct vm_area_struct *vma;
1444 struct file *filep, *vmfile;
1445 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001446 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001447
Jordan Crouse2c542b62011-07-26 08:30:20 -06001448 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001449 if (vma == NULL)
1450 return -EINVAL;
1451
Jordan Crouse2c542b62011-07-26 08:30:20 -06001452 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1453 KGSL_CORE_ERR("Invalid vma region\n");
1454 return -EINVAL;
1455 }
1456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457 len = vma->vm_end - vma->vm_start;
1458
1459 if (size == 0)
1460 size = len;
1461
1462 if (size != len) {
1463 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1464 size, hostptr);
1465 return -EINVAL;
1466 }
1467
1468 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1469
1470 if (ret) {
1471 KGSL_CORE_ERR("get_ashmem_file failed\n");
1472 return ret;
1473 }
1474
1475 if (vmfile != vma->vm_file) {
1476 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1477 ret = -EINVAL;
1478 goto err;
1479 }
1480
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001481 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 entry->memdesc.pagetable = pagetable;
1483 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1484 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001485
1486 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1487 if (ret)
1488 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489
1490 return 0;
1491
1492err:
1493 put_ashmem_file(filep);
1494 return ret;
1495}
1496#else
1497static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1498 struct kgsl_pagetable *pagetable,
1499 int fd, void *hostptr, size_t size)
1500{
1501 return -EINVAL;
1502}
1503#endif
1504
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001505static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1506 struct kgsl_pagetable *pagetable, int fd)
1507{
1508 struct ion_handle *handle;
1509 struct scatterlist *s;
1510 unsigned long flags;
1511
1512 if (kgsl_ion_client == NULL) {
1513 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
1514 if (kgsl_ion_client == NULL)
1515 return -ENODEV;
1516 }
1517
1518 handle = ion_import_fd(kgsl_ion_client, fd);
1519 if (IS_ERR_OR_NULL(handle))
1520 return PTR_ERR(handle);
1521
1522 entry->memtype = KGSL_MEM_ENTRY_ION;
1523 entry->priv_data = handle;
1524 entry->memdesc.pagetable = pagetable;
1525 entry->memdesc.size = 0;
1526
1527 if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
1528 goto err;
1529
1530 entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
1531
1532 if (IS_ERR_OR_NULL(entry->memdesc.sg))
1533 goto err;
1534
1535 /* Calculate the size of the memdesc from the sglist */
1536
1537 entry->memdesc.sglen = 0;
1538
1539 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1540 entry->memdesc.size += s->length;
1541 entry->memdesc.sglen++;
1542 }
1543
1544 return 0;
1545err:
1546 ion_free(kgsl_ion_client, handle);
1547 return -ENOMEM;
1548}
1549
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1551 unsigned int cmd, void *data)
1552{
1553 int result = -EINVAL;
1554 struct kgsl_map_user_mem *param = data;
1555 struct kgsl_mem_entry *entry = NULL;
1556 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001557 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558
1559 entry = kgsl_mem_entry_create();
1560
1561 if (entry == NULL)
1562 return -ENOMEM;
1563
Jason848741a2011-07-12 10:24:25 -07001564 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1565 memtype = KGSL_USER_MEM_TYPE_PMEM;
1566 else
1567 memtype = param->memtype;
1568
1569 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001570 case KGSL_USER_MEM_TYPE_PMEM:
1571 if (param->fd == 0 || param->len == 0)
1572 break;
1573
1574 result = kgsl_setup_phys_file(entry, private->pagetable,
1575 param->fd, param->offset,
1576 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001577 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 break;
1579
1580 case KGSL_USER_MEM_TYPE_ADDR:
1581 if (!kgsl_mmu_enabled()) {
1582 KGSL_DRV_ERR(dev_priv->device,
1583 "Cannot map paged memory with the "
1584 "MMU disabled\n");
1585 break;
1586 }
1587
1588 if (param->hostptr == 0)
1589 break;
1590
1591 result = kgsl_setup_hostptr(entry, private->pagetable,
1592 (void *) param->hostptr,
1593 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001594 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001595 break;
1596
1597 case KGSL_USER_MEM_TYPE_ASHMEM:
1598 if (!kgsl_mmu_enabled()) {
1599 KGSL_DRV_ERR(dev_priv->device,
1600 "Cannot map paged memory with the "
1601 "MMU disabled\n");
1602 break;
1603 }
1604
1605 if (param->hostptr == 0)
1606 break;
1607
1608 result = kgsl_setup_ashmem(entry, private->pagetable,
1609 param->fd, (void *) param->hostptr,
1610 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001611
1612 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001614 case KGSL_USER_MEM_TYPE_ION:
1615 result = kgsl_setup_ion(entry, private->pagetable,
1616 param->fd);
1617 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 default:
Jason848741a2011-07-12 10:24:25 -07001619 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001620 break;
1621 }
1622
1623 if (result)
1624 goto error;
1625
1626 result = kgsl_mmu_map(private->pagetable,
1627 &entry->memdesc,
1628 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1629
1630 if (result)
1631 goto error_put_file_ptr;
1632
1633 /* Adjust the returned value for a non 4k aligned offset */
1634 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001637 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001639 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640
1641 kgsl_mem_entry_attach_process(entry, private);
1642
1643 kgsl_check_idle(dev_priv->device);
1644 return result;
1645
1646 error_put_file_ptr:
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001647 if (entry->priv_data)
1648 fput(entry->priv_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649
1650error:
1651 kfree(entry);
1652 kgsl_check_idle(dev_priv->device);
1653 return result;
1654}
1655
1656/*This function flushes a graphics memory allocation from CPU cache
1657 *when caching is enabled with MMU*/
1658static long
1659kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1660 unsigned int cmd, void *data)
1661{
1662 int result = 0;
1663 struct kgsl_mem_entry *entry;
1664 struct kgsl_sharedmem_free *param = data;
1665 struct kgsl_process_private *private = dev_priv->process_priv;
1666
1667 spin_lock(&private->mem_lock);
1668 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1669 if (!entry) {
1670 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1671 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001672 goto done;
1673 }
1674 if (!entry->memdesc.hostptr)
1675 entry->memdesc.hostptr =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 kgsl_gpuaddr_to_vaddr(&entry->memdesc,
1677 param->gpuaddr, &entry->memdesc.size);
1678
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001679 if (!entry->memdesc.hostptr) {
1680 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1681 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001684
1685 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001687 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688 return result;
1689}
1690
1691static long
1692kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1693 unsigned int cmd, void *data)
1694{
1695 struct kgsl_process_private *private = dev_priv->process_priv;
1696 struct kgsl_gpumem_alloc *param = data;
1697 struct kgsl_mem_entry *entry;
1698 int result;
1699
1700 entry = kgsl_mem_entry_create();
1701 if (entry == NULL)
1702 return -ENOMEM;
1703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1705 param->size, param->flags);
1706
1707 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001708 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 kgsl_mem_entry_attach_process(entry, private);
1710 param->gpuaddr = entry->memdesc.gpuaddr;
1711
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001712 kgsl_process_add_stats(private, entry->memtype, param->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713 } else
1714 kfree(entry);
1715
1716 kgsl_check_idle(dev_priv->device);
1717 return result;
1718}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001719static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1720 unsigned int cmd, void *data)
1721{
1722 int result = 0;
1723 struct kgsl_cff_syncmem *param = data;
1724 struct kgsl_process_private *private = dev_priv->process_priv;
1725 struct kgsl_mem_entry *entry = NULL;
1726
1727 spin_lock(&private->mem_lock);
1728 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1729 if (entry)
1730 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1731 param->len, true);
1732 else
1733 result = -EINVAL;
1734 spin_unlock(&private->mem_lock);
1735 return result;
1736}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001738static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1739 unsigned int cmd, void *data)
1740{
1741 int result = 0;
1742 struct kgsl_cff_user_event *param = data;
1743
1744 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1745 param->op3, param->op4, param->op5);
1746
1747 return result;
1748}
1749
Jordan Croused4bc9d22011-11-17 13:39:21 -07001750#ifdef CONFIG_GENLOCK
1751struct kgsl_genlock_event_priv {
1752 struct genlock_handle *handle;
1753 struct genlock *lock;
1754};
1755
1756/**
1757 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
1758 * @device - The KGSL device that expired the timestamp
1759 * @priv - private data for the event
1760 * @timestamp - the timestamp that triggered the event
1761 *
1762 * Release a genlock lock following the expiration of a timestamp
1763 */
1764
1765static void kgsl_genlock_event_cb(struct kgsl_device *device,
1766 void *priv, u32 timestamp)
1767{
1768 struct kgsl_genlock_event_priv *ev = priv;
1769 int ret;
1770
1771 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
1772 if (ret)
1773 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
1774
1775 genlock_put_handle(ev->handle);
1776
1777 kfree(ev);
1778}
1779
1780/**
1781 * kgsl_add_genlock-event - Create a new genlock event
1782 * @device - KGSL device to create the event on
1783 * @timestamp - Timestamp to trigger the event
1784 * @data - User space buffer containing struct kgsl_genlock_event_priv
1785 * @len - length of the userspace buffer
1786 * @returns 0 on success or error code on error
1787 *
1788 * Attack to a genlock handle and register an event to release the
1789 * genlock lock when the timestamp expires
1790 */
1791
1792static int kgsl_add_genlock_event(struct kgsl_device *device,
1793 u32 timestamp, void __user *data, int len)
1794{
1795 struct kgsl_genlock_event_priv *event;
1796 struct kgsl_timestamp_event_genlock priv;
1797 int ret;
1798
1799 if (len != sizeof(priv))
1800 return -EINVAL;
1801
1802 if (copy_from_user(&priv, data, sizeof(priv)))
1803 return -EFAULT;
1804
1805 event = kzalloc(sizeof(*event), GFP_KERNEL);
1806
1807 if (event == NULL)
1808 return -ENOMEM;
1809
1810 event->handle = genlock_get_handle_fd(priv.handle);
1811
1812 if (IS_ERR(event->handle)) {
1813 int ret = PTR_ERR(event->handle);
1814 kfree(event);
1815 return ret;
1816 }
1817
1818 ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event);
1819 if (ret)
1820 kfree(event);
1821
1822 return ret;
1823}
1824#else
1825static long kgsl_add_genlock_event(struct kgsl_device *device,
1826 u32 timestamp, void __user *data, int len)
1827{
1828 return -EINVAL;
1829}
1830#endif
1831
1832/**
1833 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
1834 * @dev_priv - pointer to the private device structure
1835 * @cmd - the ioctl cmd passed from kgsl_ioctl
1836 * @data - the user data buffer from kgsl_ioctl
1837 * @returns 0 on success or error code on failure
1838 */
1839
1840static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
1841 unsigned int cmd, void *data)
1842{
1843 struct kgsl_timestamp_event *param = data;
1844 int ret;
1845
1846 switch (param->type) {
1847 case KGSL_TIMESTAMP_EVENT_GENLOCK:
1848 ret = kgsl_add_genlock_event(dev_priv->device,
1849 param->timestamp, param->priv, param->len);
1850 break;
1851 default:
1852 ret = -EINVAL;
1853 }
1854
1855 return ret;
1856}
1857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
1859 unsigned int, void *);
1860
1861#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
1862 [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
1863
1864static const struct {
1865 unsigned int cmd;
1866 kgsl_ioctl_func_t func;
1867 int lock;
1868} kgsl_ioctl_funcs[] = {
1869 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
1870 kgsl_ioctl_device_getproperty, 1),
1871 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
1872 kgsl_ioctl_device_waittimestamp, 1),
1873 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
1874 kgsl_ioctl_rb_issueibcmds, 1),
1875 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
1876 kgsl_ioctl_cmdstream_readtimestamp, 1),
1877 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
1878 kgsl_ioctl_cmdstream_freememontimestamp, 1),
1879 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
1880 kgsl_ioctl_drawctxt_create, 1),
1881 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
1882 kgsl_ioctl_drawctxt_destroy, 1),
1883 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
1884 kgsl_ioctl_map_user_mem, 0),
1885 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
1886 kgsl_ioctl_map_user_mem, 0),
1887 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
1888 kgsl_ioctl_sharedmem_free, 0),
1889 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
1890 kgsl_ioctl_sharedmem_from_vmalloc, 0),
1891 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
1892 kgsl_ioctl_sharedmem_flush_cache, 0),
1893 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
1894 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001895 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
1896 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001897 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
1898 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07001899 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
1900 kgsl_ioctl_timestamp_event, 0),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901};
1902
1903static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1904{
1905 struct kgsl_device_private *dev_priv = filep->private_data;
1906 unsigned int nr = _IOC_NR(cmd);
1907 kgsl_ioctl_func_t func;
1908 int lock, ret;
1909 char ustack[64];
1910 void *uptr = NULL;
1911
1912 BUG_ON(dev_priv == NULL);
1913
1914 /* Workaround for an previously incorrectly defined ioctl code.
1915 This helps ensure binary compatability */
1916
1917 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
1918 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07001919 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
1920 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001921
1922 if (cmd & (IOC_IN | IOC_OUT)) {
1923 if (_IOC_SIZE(cmd) < sizeof(ustack))
1924 uptr = ustack;
1925 else {
1926 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
1927 if (uptr == NULL) {
1928 KGSL_MEM_ERR(dev_priv->device,
1929 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
1930 ret = -ENOMEM;
1931 goto done;
1932 }
1933 }
1934
1935 if (cmd & IOC_IN) {
1936 if (copy_from_user(uptr, (void __user *) arg,
1937 _IOC_SIZE(cmd))) {
1938 ret = -EFAULT;
1939 goto done;
1940 }
1941 } else
1942 memset(uptr, 0, _IOC_SIZE(cmd));
1943 }
1944
1945 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
1946 kgsl_ioctl_funcs[nr].func != NULL) {
1947 func = kgsl_ioctl_funcs[nr].func;
1948 lock = kgsl_ioctl_funcs[nr].lock;
1949 } else {
1950 func = dev_priv->device->ftbl->ioctl;
1951 if (!func) {
1952 KGSL_DRV_INFO(dev_priv->device,
1953 "invalid ioctl code %08x\n", cmd);
1954 ret = -EINVAL;
1955 goto done;
1956 }
1957 lock = 1;
1958 }
1959
1960 if (lock) {
1961 mutex_lock(&dev_priv->device->mutex);
1962 kgsl_check_suspended(dev_priv->device);
1963 }
1964
1965 ret = func(dev_priv, cmd, uptr);
1966
1967 if (lock) {
1968 kgsl_check_idle_locked(dev_priv->device);
1969 mutex_unlock(&dev_priv->device->mutex);
1970 }
1971
1972 if (ret == 0 && (cmd & IOC_OUT)) {
1973 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
1974 ret = -EFAULT;
1975 }
1976
1977done:
1978 if (_IOC_SIZE(cmd) >= sizeof(ustack))
1979 kfree(uptr);
1980
1981 return ret;
1982}
1983
1984static int
1985kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
1986{
1987 struct kgsl_memdesc *memdesc = &device->memstore;
1988 int result;
1989 unsigned int vma_size = vma->vm_end - vma->vm_start;
1990
1991 /* The memstore can only be mapped as read only */
1992
1993 if (vma->vm_flags & VM_WRITE)
1994 return -EPERM;
1995
1996 if (memdesc->size != vma_size) {
1997 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
1998 vma_size, memdesc->size);
1999 return -EINVAL;
2000 }
2001
2002 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2003
2004 result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
2005 vma_size, vma->vm_page_prot);
2006 if (result != 0)
2007 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2008 result);
2009
2010 return result;
2011}
2012
Jordan Crouse4283e172011-09-26 14:45:47 -06002013/*
2014 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2015 * Increase the refcount to make sure that the accounting stays correct
2016 */
2017
2018static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2019{
2020 struct kgsl_mem_entry *entry = vma->vm_private_data;
2021 kgsl_mem_entry_get(entry);
2022}
2023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024static int
2025kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2026{
2027 struct kgsl_mem_entry *entry = vma->vm_private_data;
2028
Jordan Croused17e9aa2011-10-12 16:57:48 -06002029 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 return VM_FAULT_SIGBUS;
2031
2032 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2033}
2034
2035static void
2036kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2037{
2038 struct kgsl_mem_entry *entry = vma->vm_private_data;
2039 kgsl_mem_entry_put(entry);
2040}
2041
2042static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002043 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 .fault = kgsl_gpumem_vm_fault,
2045 .close = kgsl_gpumem_vm_close,
2046};
2047
2048static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2049{
2050 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051 struct kgsl_device_private *dev_priv = file->private_data;
2052 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002053 struct kgsl_mem_entry *tmp, *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002054 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002055
2056 /* Handle leagacy behavior for memstore */
2057
2058 if (vma_offset == device->memstore.physaddr)
2059 return kgsl_mmap_memstore(device, vma);
2060
2061 /* Find a chunk of GPU memory */
2062
2063 spin_lock(&private->mem_lock);
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002064 list_for_each_entry(tmp, &private->mem_list, list) {
2065 if (vma_offset == tmp->memdesc.gpuaddr) {
2066 kgsl_mem_entry_get(tmp);
2067 entry = tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068 break;
2069 }
2070 }
2071 spin_unlock(&private->mem_lock);
2072
2073 if (entry == NULL)
2074 return -EINVAL;
2075
Jordan Croused17e9aa2011-10-12 16:57:48 -06002076 if (!entry->memdesc.ops ||
2077 !entry->memdesc.ops->vmflags ||
2078 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079 return -EINVAL;
2080
2081 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2082
2083 vma->vm_private_data = entry;
2084 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2085 vma->vm_ops = &kgsl_gpumem_vm_ops;
2086 vma->vm_file = file;
2087
2088 return 0;
2089}
2090
2091static const struct file_operations kgsl_fops = {
2092 .owner = THIS_MODULE,
2093 .release = kgsl_release,
2094 .open = kgsl_open,
2095 .mmap = kgsl_mmap,
2096 .unlocked_ioctl = kgsl_ioctl,
2097};
2098
2099struct kgsl_driver kgsl_driver = {
2100 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2101 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2102 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2103};
2104EXPORT_SYMBOL(kgsl_driver);
2105
2106void kgsl_unregister_device(struct kgsl_device *device)
2107{
2108 int minor;
2109
2110 mutex_lock(&kgsl_driver.devlock);
2111 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2112 if (device == kgsl_driver.devp[minor])
2113 break;
2114 }
2115
2116 mutex_unlock(&kgsl_driver.devlock);
2117
2118 if (minor == KGSL_DEVICE_MAX)
2119 return;
2120
2121 kgsl_cffdump_close(device->id);
2122 kgsl_pwrctrl_uninit_sysfs(device);
2123
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002124 if (cpu_is_msm8x60())
2125 wake_lock_destroy(&device->idle_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126
2127 idr_destroy(&device->context_idr);
2128
2129 if (device->memstore.hostptr)
2130 kgsl_sharedmem_free(&device->memstore);
2131
2132 kgsl_mmu_close(device);
2133
2134 if (device->work_queue) {
2135 destroy_workqueue(device->work_queue);
2136 device->work_queue = NULL;
2137 }
2138
2139 device_destroy(kgsl_driver.class,
2140 MKDEV(MAJOR(kgsl_driver.major), minor));
2141
2142 mutex_lock(&kgsl_driver.devlock);
2143 kgsl_driver.devp[minor] = NULL;
2144 mutex_unlock(&kgsl_driver.devlock);
2145}
2146EXPORT_SYMBOL(kgsl_unregister_device);
2147
2148int
2149kgsl_register_device(struct kgsl_device *device)
2150{
2151 int minor, ret;
2152 dev_t dev;
2153
2154 /* Find a minor for the device */
2155
2156 mutex_lock(&kgsl_driver.devlock);
2157 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2158 if (kgsl_driver.devp[minor] == NULL) {
2159 kgsl_driver.devp[minor] = device;
2160 break;
2161 }
2162 }
2163
2164 mutex_unlock(&kgsl_driver.devlock);
2165
2166 if (minor == KGSL_DEVICE_MAX) {
2167 KGSL_CORE_ERR("minor devices exhausted\n");
2168 return -ENODEV;
2169 }
2170
2171 /* Create the device */
2172 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2173 device->dev = device_create(kgsl_driver.class,
2174 device->parentdev,
2175 dev, device,
2176 device->name);
2177
2178 if (IS_ERR(device->dev)) {
2179 ret = PTR_ERR(device->dev);
2180 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
2181 goto err_devlist;
2182 }
2183
2184 dev_set_drvdata(device->parentdev, device);
2185
2186 /* Generic device initialization */
2187 init_waitqueue_head(&device->wait_queue);
2188
2189 kgsl_cffdump_open(device->id);
2190
2191 init_completion(&device->hwaccess_gate);
2192 init_completion(&device->suspend_gate);
2193
2194 ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
2195
2196 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2197 ret = kgsl_create_device_workqueue(device);
2198 if (ret)
2199 goto err_devlist;
2200
2201 INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
Jordan Crouse1bf80aa2011-10-12 16:57:47 -06002202 INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002203
2204 INIT_LIST_HEAD(&device->memqueue);
Jordan Croused4bc9d22011-11-17 13:39:21 -07002205 INIT_LIST_HEAD(&device->events);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002206
2207 ret = kgsl_mmu_init(device);
2208 if (ret != 0)
2209 goto err_dest_work_q;
2210
2211 ret = kgsl_allocate_contiguous(&device->memstore,
2212 sizeof(struct kgsl_devmemstore));
2213
2214 if (ret != 0)
2215 goto err_close_mmu;
2216
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002217 if (cpu_is_msm8x60())
2218 wake_lock_init(&device->idle_wakelock,
2219 WAKE_LOCK_IDLE, device->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002220
2221 idr_init(&device->context_idr);
2222
2223 /* sysfs and debugfs initalization - failure here is non fatal */
2224
2225 /* Initialize logging */
2226 kgsl_device_debugfs_init(device);
2227
2228 /* Initialize common sysfs entries */
2229 kgsl_pwrctrl_init_sysfs(device);
2230
2231 return 0;
2232
2233err_close_mmu:
2234 kgsl_mmu_close(device);
2235err_dest_work_q:
2236 destroy_workqueue(device->work_queue);
2237 device->work_queue = NULL;
2238err_devlist:
2239 mutex_lock(&kgsl_driver.devlock);
2240 kgsl_driver.devp[minor] = NULL;
2241 mutex_unlock(&kgsl_driver.devlock);
2242
2243 return ret;
2244}
2245EXPORT_SYMBOL(kgsl_register_device);
2246
2247int kgsl_device_platform_probe(struct kgsl_device *device,
2248 irqreturn_t (*dev_isr) (int, void*))
2249{
2250 int status = -EINVAL;
2251 struct kgsl_memregion *regspace = NULL;
2252 struct resource *res;
2253 struct platform_device *pdev =
2254 container_of(device->parentdev, struct platform_device, dev);
2255
2256 pm_runtime_enable(device->parentdev);
2257
2258 status = kgsl_pwrctrl_init(device);
2259 if (status)
2260 goto error;
2261
2262 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2263 device->iomemname);
2264 if (res == NULL) {
2265 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2266 status = -EINVAL;
2267 goto error_pwrctrl_close;
2268 }
2269 if (res->start == 0 || resource_size(res) == 0) {
2270 KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
2271 status = -EINVAL;
2272 goto error_pwrctrl_close;
2273 }
2274
2275 regspace = &device->regspace;
2276 regspace->mmio_phys_base = res->start;
2277 regspace->sizebytes = resource_size(res);
2278
2279 if (!request_mem_region(regspace->mmio_phys_base,
2280 regspace->sizebytes, device->name)) {
2281 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2282 status = -ENODEV;
2283 goto error_pwrctrl_close;
2284 }
2285
2286 regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
2287 regspace->sizebytes);
2288
2289 if (regspace->mmio_virt_base == NULL) {
2290 KGSL_DRV_ERR(device, "ioremap failed\n");
2291 status = -ENODEV;
2292 goto error_release_mem;
2293 }
2294
2295 status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
2296 IRQF_TRIGGER_HIGH, device->name, device);
2297 if (status) {
2298 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2299 device->pwrctrl.interrupt_num, status);
2300 goto error_iounmap;
2301 }
2302 device->pwrctrl.have_irq = 1;
2303 disable_irq(device->pwrctrl.interrupt_num);
2304
2305 KGSL_DRV_INFO(device,
2306 "dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
2307 device->id, regspace->mmio_phys_base,
2308 regspace->sizebytes, regspace->mmio_virt_base);
2309
2310
2311 status = kgsl_register_device(device);
2312 if (!status)
2313 return status;
2314
2315 free_irq(device->pwrctrl.interrupt_num, NULL);
2316 device->pwrctrl.have_irq = 0;
2317error_iounmap:
2318 iounmap(regspace->mmio_virt_base);
2319 regspace->mmio_virt_base = NULL;
2320error_release_mem:
2321 release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
2322error_pwrctrl_close:
2323 kgsl_pwrctrl_close(device);
2324error:
2325 return status;
2326}
2327EXPORT_SYMBOL(kgsl_device_platform_probe);
2328
2329void kgsl_device_platform_remove(struct kgsl_device *device)
2330{
2331 struct kgsl_memregion *regspace = &device->regspace;
2332
2333 kgsl_unregister_device(device);
2334
2335 if (regspace->mmio_virt_base != NULL) {
2336 iounmap(regspace->mmio_virt_base);
2337 regspace->mmio_virt_base = NULL;
2338 release_mem_region(regspace->mmio_phys_base,
2339 regspace->sizebytes);
2340 }
2341 kgsl_pwrctrl_close(device);
2342
2343 pm_runtime_disable(device->parentdev);
2344}
2345EXPORT_SYMBOL(kgsl_device_platform_remove);
2346
2347static int __devinit
2348kgsl_ptdata_init(void)
2349{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002350 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(KGSL_PAGETABLE_SIZE,
2351 kgsl_pagetable_count);
2352 if (!kgsl_driver.ptpool)
2353 return -ENOMEM;
2354 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002355}
2356
2357static void kgsl_core_exit(void)
2358{
2359 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
2360
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002361 kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
2362 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002363
2364 device_unregister(&kgsl_driver.virtdev);
2365
2366 if (kgsl_driver.class) {
2367 class_destroy(kgsl_driver.class);
2368 kgsl_driver.class = NULL;
2369 }
2370
2371 kgsl_drm_exit();
2372 kgsl_cffdump_destroy();
Jordan Croused8f1c6b2011-10-04 09:31:29 -06002373 kgsl_core_debugfs_close();
2374 kgsl_sharedmem_uninit_sysfs();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002375}
2376
2377static int __init kgsl_core_init(void)
2378{
2379 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002380 /* alloc major and minor device numbers */
2381 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2382 KGSL_NAME);
2383 if (result < 0) {
2384 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2385 goto err;
2386 }
2387
2388 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2389 kgsl_driver.cdev.owner = THIS_MODULE;
2390 kgsl_driver.cdev.ops = &kgsl_fops;
2391 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2392 KGSL_DEVICE_MAX);
2393
2394 if (result) {
2395 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2396 " result= %d\n", kgsl_driver.major, result);
2397 goto err;
2398 }
2399
2400 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2401
2402 if (IS_ERR(kgsl_driver.class)) {
2403 result = PTR_ERR(kgsl_driver.class);
2404 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2405 goto err;
2406 }
2407
2408 /* Make a virtual device for managing core related things
2409 in sysfs */
2410 kgsl_driver.virtdev.class = kgsl_driver.class;
2411 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2412 result = device_register(&kgsl_driver.virtdev);
2413 if (result) {
2414 KGSL_CORE_ERR("driver_register failed\n");
2415 goto err;
2416 }
2417
2418 /* Make kobjects in the virtual device for storing statistics */
2419
2420 kgsl_driver.ptkobj =
2421 kobject_create_and_add("pagetables",
2422 &kgsl_driver.virtdev.kobj);
2423
2424 kgsl_driver.prockobj =
2425 kobject_create_and_add("proc",
2426 &kgsl_driver.virtdev.kobj);
2427
2428 kgsl_core_debugfs_init();
2429
2430 kgsl_sharedmem_init_sysfs();
2431 kgsl_cffdump_init();
2432
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002433 INIT_LIST_HEAD(&kgsl_driver.process_list);
2434
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002435 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2436
2437 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2438
2439 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2440 result = kgsl_ptdata_init();
2441 if (result)
2442 goto err;
2443 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002444
2445 result = kgsl_drm_init(NULL);
2446
2447 if (result)
2448 goto err;
2449
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002450 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002452 return 0;
2453
2454err:
2455 kgsl_core_exit();
2456 return result;
2457}
2458
2459module_init(kgsl_core_init);
2460module_exit(kgsl_core_exit);
2461
2462MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2463MODULE_DESCRIPTION("MSM GPU driver");
2464MODULE_LICENSE("GPL");