blob: b7356ac44ffa8cbcba4cb012da54ec84c1ea066d [file] [log] [blame]
Tarun Karraf8e5cd22012-01-09 14:10:09 -07001/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/fb.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/debugfs.h>
17#include <linux/uaccess.h>
18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20#include <linux/android_pmem.h>
21#include <linux/vmalloc.h>
22#include <linux/pm_runtime.h>
Jordan Croused4bc9d22011-11-17 13:39:21 -070023#include <linux/genlock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include <linux/ashmem.h>
26#include <linux/major.h>
Jordan Crouse8eab35a2011-10-12 16:57:48 -060027#include <linux/ion.h>
Lucille Sylvesteref44e7332011-11-02 13:21:17 -070028#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#include "kgsl.h"
31#include "kgsl_debugfs.h"
32#include "kgsl_cffdump.h"
33#include "kgsl_log.h"
34#include "kgsl_sharedmem.h"
35#include "kgsl_device.h"
Norman Geed7402ff2011-10-28 08:51:11 -060036#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38#undef MODULE_PARAM_PREFIX
39#define MODULE_PARAM_PREFIX "kgsl."
40
41static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060042static char *ksgl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043module_param_named(ptcount, kgsl_pagetable_count, int, 0);
44MODULE_PARM_DESC(kgsl_pagetable_count,
45"Minimum number of pagetables for KGSL to allocate at initialization time");
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060046module_param_named(mmutype, ksgl_mmu_type, charp, 0);
47MODULE_PARM_DESC(ksgl_mmu_type,
48"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049
Jordan Crouse8eab35a2011-10-12 16:57:48 -060050static struct ion_client *kgsl_ion_client;
51
Jordan Croused4bc9d22011-11-17 13:39:21 -070052/**
53 * kgsl_add_event - Add a new timstamp event for the KGSL device
54 * @device - KGSL device for the new event
55 * @ts - the timestamp to trigger the event on
56 * @cb - callback function to call when the timestamp expires
57 * @priv - private data for the specific event type
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070058 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -070059 *
60 * @returns - 0 on success or error code on failure
61 */
62
63static int kgsl_add_event(struct kgsl_device *device, u32 ts,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070064 void (*cb)(struct kgsl_device *, void *, u32), void *priv,
65 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -070066{
67 struct kgsl_event *event;
68 struct list_head *n;
69 unsigned int cur = device->ftbl->readtimestamp(device,
70 KGSL_TIMESTAMP_RETIRED);
71
72 if (cb == NULL)
73 return -EINVAL;
74
75 /* Check to see if the requested timestamp has already fired */
76
77 if (timestamp_cmp(cur, ts) >= 0) {
78 cb(device, priv, cur);
79 return 0;
80 }
81
82 event = kzalloc(sizeof(*event), GFP_KERNEL);
83 if (event == NULL)
84 return -ENOMEM;
85
86 event->timestamp = ts;
87 event->priv = priv;
88 event->func = cb;
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -070089 event->owner = owner;
Jordan Croused4bc9d22011-11-17 13:39:21 -070090
91 /* Add the event in order to the list */
92
93 for (n = device->events.next ; n != &device->events; n = n->next) {
94 struct kgsl_event *e =
95 list_entry(n, struct kgsl_event, list);
96
97 if (timestamp_cmp(e->timestamp, ts) > 0) {
98 list_add(&event->list, n->prev);
99 break;
100 }
101 }
102
103 if (n == &device->events)
104 list_add_tail(&event->list, &device->events);
105
Jeremy Gebben63904832012-02-07 16:10:55 -0700106 queue_work(device->work_queue, &device->ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700107 return 0;
108}
Jordan Croused4bc9d22011-11-17 13:39:21 -0700109
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700110/**
111 * kgsl_cancel_events - Cancel all events for a process
112 * @device - KGSL device for the events to cancel
113 * @owner - driver instance that owns the events to cancel
114 *
115 */
116static void kgsl_cancel_events(struct kgsl_device *device,
117 struct kgsl_device_private *owner)
118{
119 struct kgsl_event *event, *event_tmp;
120 unsigned int cur = device->ftbl->readtimestamp(device,
121 KGSL_TIMESTAMP_RETIRED);
122
123 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
124 if (event->owner != owner)
125 continue;
126 /*
127 * "cancel" the events by calling their callback.
128 * Currently, events are used for lock and memory
129 * management, so if the process is dying the right
130 * thing to do is release or free.
131 */
132 if (event->func)
133 event->func(device, event->priv, cur);
134
135 list_del(&event->list);
136 kfree(event);
137 }
138}
139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700140static inline struct kgsl_mem_entry *
141kgsl_mem_entry_create(void)
142{
143 struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
144
145 if (!entry)
146 KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
147 else
148 kref_init(&entry->refcount);
149
150 return entry;
151}
152
153void
154kgsl_mem_entry_destroy(struct kref *kref)
155{
156 struct kgsl_mem_entry *entry = container_of(kref,
157 struct kgsl_mem_entry,
158 refcount);
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600159
160 entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
161
162 if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
163 kgsl_driver.stats.mapped -= entry->memdesc.size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600165 /*
166 * Ion takes care of freeing the sglist for us (how nice </sarcasm>) so
167 * unmap the dma before freeing the sharedmem so kgsl_sharedmem_free
168 * doesn't try to free it again
169 */
170
171 if (entry->memtype == KGSL_MEM_ENTRY_ION) {
172 ion_unmap_dma(kgsl_ion_client, entry->priv_data);
173 entry->memdesc.sg = NULL;
174 }
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 kgsl_sharedmem_free(&entry->memdesc);
177
Jordan Crouse1b897cf2011-10-12 16:57:48 -0600178 switch (entry->memtype) {
179 case KGSL_MEM_ENTRY_PMEM:
180 case KGSL_MEM_ENTRY_ASHMEM:
181 if (entry->priv_data)
182 fput(entry->priv_data);
183 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600184 case KGSL_MEM_ENTRY_ION:
185 ion_free(kgsl_ion_client, entry->priv_data);
186 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 }
188
189 kfree(entry);
190}
191EXPORT_SYMBOL(kgsl_mem_entry_destroy);
192
193static
194void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
195 struct kgsl_process_private *process)
196{
197 spin_lock(&process->mem_lock);
198 list_add(&entry->list, &process->mem_list);
199 spin_unlock(&process->mem_lock);
200
201 entry->priv = process;
202}
203
204/* Allocate a new context id */
205
206static struct kgsl_context *
207kgsl_create_context(struct kgsl_device_private *dev_priv)
208{
209 struct kgsl_context *context;
210 int ret, id;
211
212 context = kzalloc(sizeof(*context), GFP_KERNEL);
213
214 if (context == NULL)
215 return NULL;
216
217 while (1) {
218 if (idr_pre_get(&dev_priv->device->context_idr,
219 GFP_KERNEL) == 0) {
220 kfree(context);
221 return NULL;
222 }
223
224 ret = idr_get_new(&dev_priv->device->context_idr,
225 context, &id);
226
227 if (ret != -EAGAIN)
228 break;
229 }
230
231 if (ret) {
232 kfree(context);
233 return NULL;
234 }
235
236 context->id = id;
237 context->dev_priv = dev_priv;
238
239 return context;
240}
241
242static void
243kgsl_destroy_context(struct kgsl_device_private *dev_priv,
244 struct kgsl_context *context)
245{
246 int id;
247
248 if (context == NULL)
249 return;
250
251 /* Fire a bug if the devctxt hasn't been freed */
252 BUG_ON(context->devctxt);
253
254 id = context->id;
255 kfree(context);
256
257 idr_remove(&dev_priv->device->context_idr, id);
258}
259
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600260static void kgsl_timestamp_expired(struct work_struct *work)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261{
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600262 struct kgsl_device *device = container_of(work, struct kgsl_device,
263 ts_expired_ws);
Jordan Croused4bc9d22011-11-17 13:39:21 -0700264 struct kgsl_event *event, *event_tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 uint32_t ts_processed;
266
Jordan Crouse1bf80aa2011-10-12 16:57:47 -0600267 mutex_lock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268
269 /* get current EOP timestamp */
270 ts_processed = device->ftbl->readtimestamp(device,
271 KGSL_TIMESTAMP_RETIRED);
272
Jordan Croused4bc9d22011-11-17 13:39:21 -0700273 /* Process expired events */
274 list_for_each_entry_safe(event, event_tmp, &device->events, list) {
275 if (timestamp_cmp(ts_processed, event->timestamp) < 0)
276 break;
277
278 if (event->func)
279 event->func(device, event->priv, ts_processed);
280
281 list_del(&event->list);
282 kfree(event);
283 }
284
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285 mutex_unlock(&device->mutex);
286}
287
288static void kgsl_check_idle_locked(struct kgsl_device *device)
289{
290 if (device->pwrctrl.nap_allowed == true &&
291 device->state == KGSL_STATE_ACTIVE &&
292 device->requested_state == KGSL_STATE_NONE) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700293 kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 if (kgsl_pwrctrl_sleep(device) != 0)
295 mod_timer(&device->idle_timer,
296 jiffies +
297 device->pwrctrl.interval_timeout);
298 }
299}
300
301static void kgsl_check_idle(struct kgsl_device *device)
302{
303 mutex_lock(&device->mutex);
304 kgsl_check_idle_locked(device);
305 mutex_unlock(&device->mutex);
306}
307
308struct kgsl_device *kgsl_get_device(int dev_idx)
309{
310 int i;
311 struct kgsl_device *ret = NULL;
312
313 mutex_lock(&kgsl_driver.devlock);
314
315 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
316 if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
317 ret = kgsl_driver.devp[i];
318 break;
319 }
320 }
321
322 mutex_unlock(&kgsl_driver.devlock);
323 return ret;
324}
325EXPORT_SYMBOL(kgsl_get_device);
326
327static struct kgsl_device *kgsl_get_minor(int minor)
328{
329 struct kgsl_device *ret = NULL;
330
331 if (minor < 0 || minor >= KGSL_DEVICE_MAX)
332 return NULL;
333
334 mutex_lock(&kgsl_driver.devlock);
335 ret = kgsl_driver.devp[minor];
336 mutex_unlock(&kgsl_driver.devlock);
337
338 return ret;
339}
340
341int kgsl_register_ts_notifier(struct kgsl_device *device,
342 struct notifier_block *nb)
343{
344 BUG_ON(device == NULL);
345 return atomic_notifier_chain_register(&device->ts_notifier_list,
346 nb);
347}
348EXPORT_SYMBOL(kgsl_register_ts_notifier);
349
350int kgsl_unregister_ts_notifier(struct kgsl_device *device,
351 struct notifier_block *nb)
352{
353 BUG_ON(device == NULL);
354 return atomic_notifier_chain_unregister(&device->ts_notifier_list,
355 nb);
356}
357EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
358
359int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
360{
361 unsigned int ts_processed;
362
363 ts_processed = device->ftbl->readtimestamp(device,
364 KGSL_TIMESTAMP_RETIRED);
365
Jordan Crousee6239dd2011-11-17 13:39:21 -0700366 return (timestamp_cmp(ts_processed, timestamp) >= 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367}
368EXPORT_SYMBOL(kgsl_check_timestamp);
369
370static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
371{
372 int status = -EINVAL;
373 unsigned int nap_allowed_saved;
374 struct kgsl_pwrscale_policy *policy_saved;
375
376 if (!device)
377 return -EINVAL;
378
379 KGSL_PWR_WARN(device, "suspend start\n");
380
381 mutex_lock(&device->mutex);
382 nap_allowed_saved = device->pwrctrl.nap_allowed;
383 device->pwrctrl.nap_allowed = false;
384 policy_saved = device->pwrscale.policy;
385 device->pwrscale.policy = NULL;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700386 kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387 /* Make sure no user process is waiting for a timestamp *
388 * before supending */
389 if (device->active_cnt != 0) {
390 mutex_unlock(&device->mutex);
391 wait_for_completion(&device->suspend_gate);
392 mutex_lock(&device->mutex);
393 }
Suman Tatiraju4a32c652012-02-17 11:59:05 -0800394 /* Don't let the timer wake us during suspended sleep. */
395 del_timer_sync(&device->idle_timer);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 switch (device->state) {
397 case KGSL_STATE_INIT:
398 break;
399 case KGSL_STATE_ACTIVE:
400 /* Wait for the device to become idle */
401 device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
402 case KGSL_STATE_NAP:
403 case KGSL_STATE_SLEEP:
404 /* Get the completion ready to be waited upon. */
405 INIT_COMPLETION(device->hwaccess_gate);
406 device->ftbl->suspend_context(device);
407 device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700408 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 break;
Suman Tatiraju24569022011-10-27 11:11:12 -0700410 case KGSL_STATE_SLUMBER:
411 INIT_COMPLETION(device->hwaccess_gate);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700412 kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
Suman Tatiraju24569022011-10-27 11:11:12 -0700413 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 default:
415 KGSL_PWR_ERR(device, "suspend fail, device %d\n",
416 device->id);
417 goto end;
418 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700419 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 device->pwrctrl.nap_allowed = nap_allowed_saved;
421 device->pwrscale.policy = policy_saved;
422 status = 0;
423
424end:
425 mutex_unlock(&device->mutex);
426 KGSL_PWR_WARN(device, "suspend end\n");
427 return status;
428}
429
430static int kgsl_resume_device(struct kgsl_device *device)
431{
432 int status = -EINVAL;
433
434 if (!device)
435 return -EINVAL;
436
437 KGSL_PWR_WARN(device, "resume start\n");
438 mutex_lock(&device->mutex);
439 if (device->state == KGSL_STATE_SUSPEND) {
Jeremy Gebben388c2972011-12-16 09:05:07 -0700440 kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700441 status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442 complete_all(&device->hwaccess_gate);
443 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700444 kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 mutex_unlock(&device->mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 KGSL_PWR_WARN(device, "resume end\n");
448 return status;
449}
450
451static int kgsl_suspend(struct device *dev)
452{
453
454 pm_message_t arg = {0};
455 struct kgsl_device *device = dev_get_drvdata(dev);
456 return kgsl_suspend_device(device, arg);
457}
458
459static int kgsl_resume(struct device *dev)
460{
461 struct kgsl_device *device = dev_get_drvdata(dev);
462 return kgsl_resume_device(device);
463}
464
465static int kgsl_runtime_suspend(struct device *dev)
466{
467 return 0;
468}
469
470static int kgsl_runtime_resume(struct device *dev)
471{
472 return 0;
473}
474
475const struct dev_pm_ops kgsl_pm_ops = {
476 .suspend = kgsl_suspend,
477 .resume = kgsl_resume,
478 .runtime_suspend = kgsl_runtime_suspend,
479 .runtime_resume = kgsl_runtime_resume,
480};
481EXPORT_SYMBOL(kgsl_pm_ops);
482
483void kgsl_early_suspend_driver(struct early_suspend *h)
484{
485 struct kgsl_device *device = container_of(h,
486 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700487 KGSL_PWR_WARN(device, "early suspend start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530488 mutex_lock(&device->mutex);
Lucille Sylvester344e4622012-01-18 15:53:21 -0700489 kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
Suman Tatiraju24569022011-10-27 11:11:12 -0700490 kgsl_pwrctrl_sleep(device);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530491 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700492 KGSL_PWR_WARN(device, "early suspend end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493}
494EXPORT_SYMBOL(kgsl_early_suspend_driver);
495
496int kgsl_suspend_driver(struct platform_device *pdev,
497 pm_message_t state)
498{
499 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
500 return kgsl_suspend_device(device, state);
501}
502EXPORT_SYMBOL(kgsl_suspend_driver);
503
504int kgsl_resume_driver(struct platform_device *pdev)
505{
506 struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
507 return kgsl_resume_device(device);
508}
509EXPORT_SYMBOL(kgsl_resume_driver);
510
511void kgsl_late_resume_driver(struct early_suspend *h)
512{
513 struct kgsl_device *device = container_of(h,
514 struct kgsl_device, display_off);
Suman Tatiraju24569022011-10-27 11:11:12 -0700515 KGSL_PWR_WARN(device, "late resume start\n");
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530516 mutex_lock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700517 kgsl_pwrctrl_wake(device);
518 device->pwrctrl.restore_slumber = 0;
Jeremy Gebben388c2972011-12-16 09:05:07 -0700519 kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
Ranjhith Kalisamy8b636952011-09-03 14:48:31 +0530520 mutex_unlock(&device->mutex);
Suman Tatiraju24569022011-10-27 11:11:12 -0700521 kgsl_check_idle(device);
522 KGSL_PWR_WARN(device, "late resume end\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523}
524EXPORT_SYMBOL(kgsl_late_resume_driver);
525
526/* file operations */
527static struct kgsl_process_private *
528kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
529{
530 struct kgsl_process_private *private;
531
532 mutex_lock(&kgsl_driver.process_mutex);
533 list_for_each_entry(private, &kgsl_driver.process_list, list) {
534 if (private->pid == task_tgid_nr(current)) {
535 private->refcnt++;
536 goto out;
537 }
538 }
539
540 /* no existing process private found for this dev_priv, create one */
541 private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
542 if (private == NULL) {
543 KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
544 sizeof(struct kgsl_process_private));
545 goto out;
546 }
547
548 spin_lock_init(&private->mem_lock);
549 private->refcnt = 1;
550 private->pid = task_tgid_nr(current);
551
552 INIT_LIST_HEAD(&private->mem_list);
553
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600554 if (kgsl_mmu_enabled())
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 {
556 unsigned long pt_name;
557
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700558 pt_name = task_tgid_nr(current);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700559 private->pagetable = kgsl_mmu_getpagetable(pt_name);
560 if (private->pagetable == NULL) {
561 kfree(private);
562 private = NULL;
563 goto out;
564 }
565 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566
567 list_add(&private->list, &kgsl_driver.process_list);
568
569 kgsl_process_init_sysfs(private);
570
571out:
572 mutex_unlock(&kgsl_driver.process_mutex);
573 return private;
574}
575
576static void
577kgsl_put_process_private(struct kgsl_device *device,
578 struct kgsl_process_private *private)
579{
580 struct kgsl_mem_entry *entry = NULL;
581 struct kgsl_mem_entry *entry_tmp = NULL;
582
583 if (!private)
584 return;
585
586 mutex_lock(&kgsl_driver.process_mutex);
587
588 if (--private->refcnt)
589 goto unlock;
590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 kgsl_process_uninit_sysfs(private);
592
593 list_del(&private->list);
594
595 list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
596 list_del(&entry->list);
597 kgsl_mem_entry_put(entry);
598 }
599
600 kgsl_mmu_putpagetable(private->pagetable);
601 kfree(private);
602unlock:
603 mutex_unlock(&kgsl_driver.process_mutex);
604}
605
606static int kgsl_release(struct inode *inodep, struct file *filep)
607{
608 int result = 0;
Jordan Crouse2db0af92011-08-08 16:05:09 -0600609 struct kgsl_device_private *dev_priv = filep->private_data;
610 struct kgsl_process_private *private = dev_priv->process_priv;
611 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612 struct kgsl_context *context;
613 int next = 0;
614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700615 filep->private_data = NULL;
616
617 mutex_lock(&device->mutex);
618 kgsl_check_suspended(device);
619
620 while (1) {
Jordan Crouse2db0af92011-08-08 16:05:09 -0600621 context = idr_get_next(&device->context_idr, &next);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622 if (context == NULL)
623 break;
624
625 if (context->dev_priv == dev_priv) {
626 device->ftbl->drawctxt_destroy(device, context);
627 kgsl_destroy_context(dev_priv, context);
628 }
629
630 next = next + 1;
631 }
632
633 device->open_count--;
634 if (device->open_count == 0) {
635 result = device->ftbl->stop(device);
Jeremy Gebben388c2972011-12-16 09:05:07 -0700636 kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 }
638 /* clean up any to-be-freed entries that belong to this
639 * process and this device
640 */
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -0700641 kgsl_cancel_events(device, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700642
643 mutex_unlock(&device->mutex);
644 kfree(dev_priv);
645
646 kgsl_put_process_private(device, private);
647
648 pm_runtime_put(device->parentdev);
649 return result;
650}
651
652static int kgsl_open(struct inode *inodep, struct file *filep)
653{
654 int result;
655 struct kgsl_device_private *dev_priv;
656 struct kgsl_device *device;
657 unsigned int minor = iminor(inodep);
658
659 device = kgsl_get_minor(minor);
660 BUG_ON(device == NULL);
661
662 if (filep->f_flags & O_EXCL) {
663 KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
664 return -EBUSY;
665 }
666
667 result = pm_runtime_get_sync(device->parentdev);
668 if (result < 0) {
669 KGSL_DRV_ERR(device,
670 "Runtime PM: Unable to wake up the device, rc = %d\n",
671 result);
672 return result;
673 }
674 result = 0;
675
676 dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
677 if (dev_priv == NULL) {
678 KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
679 sizeof(struct kgsl_device_private));
680 result = -ENOMEM;
681 goto err_pmruntime;
682 }
683
684 dev_priv->device = device;
685 filep->private_data = dev_priv;
686
687 /* Get file (per process) private struct */
688 dev_priv->process_priv = kgsl_get_process_private(dev_priv);
689 if (dev_priv->process_priv == NULL) {
690 result = -ENOMEM;
691 goto err_freedevpriv;
692 }
693
694 mutex_lock(&device->mutex);
695 kgsl_check_suspended(device);
696
697 if (device->open_count == 0) {
698 result = device->ftbl->start(device, true);
699
700 if (result) {
701 mutex_unlock(&device->mutex);
702 goto err_putprocess;
703 }
Jeremy Gebben388c2972011-12-16 09:05:07 -0700704 kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 }
706 device->open_count++;
707 mutex_unlock(&device->mutex);
708
709 KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
710 device->name, kgsl_mmu_enabled() ? "on" : "off",
711 kgsl_pagetable_count);
712
713 return result;
714
715err_putprocess:
716 kgsl_put_process_private(device, dev_priv->process_priv);
717err_freedevpriv:
718 filep->private_data = NULL;
719 kfree(dev_priv);
720err_pmruntime:
721 pm_runtime_put(device->parentdev);
722 return result;
723}
724
725
726/*call with private->mem_lock locked */
727static struct kgsl_mem_entry *
728kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
729{
730 struct kgsl_mem_entry *entry = NULL, *result = NULL;
731
732 BUG_ON(private == NULL);
733
734 gpuaddr &= PAGE_MASK;
735
736 list_for_each_entry(entry, &private->mem_list, list) {
737 if (entry->memdesc.gpuaddr == gpuaddr) {
738 result = entry;
739 break;
740 }
741 }
742 return result;
743}
744
745/*call with private->mem_lock locked */
746struct kgsl_mem_entry *
747kgsl_sharedmem_find_region(struct kgsl_process_private *private,
748 unsigned int gpuaddr,
749 size_t size)
750{
751 struct kgsl_mem_entry *entry = NULL, *result = NULL;
752
753 BUG_ON(private == NULL);
754
755 list_for_each_entry(entry, &private->mem_list, list) {
Jeremy Gebben16e80fa2011-11-30 15:56:29 -0700756 if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 result = entry;
758 break;
759 }
760 }
761
762 return result;
763}
764EXPORT_SYMBOL(kgsl_sharedmem_find_region);
765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766/*call all ioctl sub functions with driver locked*/
767static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
768 unsigned int cmd, void *data)
769{
770 int result = 0;
771 struct kgsl_device_getproperty *param = data;
772
773 switch (param->type) {
774 case KGSL_PROP_VERSION:
775 {
776 struct kgsl_version version;
777 if (param->sizebytes != sizeof(version)) {
778 result = -EINVAL;
779 break;
780 }
781
782 version.drv_major = KGSL_VERSION_MAJOR;
783 version.drv_minor = KGSL_VERSION_MINOR;
784 version.dev_major = dev_priv->device->ver_major;
785 version.dev_minor = dev_priv->device->ver_minor;
786
787 if (copy_to_user(param->value, &version, sizeof(version)))
788 result = -EFAULT;
789
790 break;
791 }
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700792 case KGSL_PROP_GPU_RESET_STAT:
793 {
794 /* Return reset status of given context and clear it */
795 uint32_t id;
796 struct kgsl_context *context;
797
798 if (param->sizebytes != sizeof(unsigned int)) {
799 result = -EINVAL;
800 break;
801 }
802 /* We expect the value passed in to contain the context id */
803 if (copy_from_user(&id, param->value,
804 sizeof(unsigned int))) {
805 result = -EFAULT;
806 break;
807 }
808 context = kgsl_find_context(dev_priv, id);
809 if (!context) {
810 result = -EINVAL;
811 break;
812 }
813 /*
814 * Copy the reset status to value which also serves as
815 * the out parameter
816 */
817 if (copy_to_user(param->value, &(context->reset_status),
818 sizeof(unsigned int))) {
819 result = -EFAULT;
820 break;
821 }
822 /* Clear reset status once its been queried */
823 context->reset_status = KGSL_CTX_STAT_NO_ERROR;
824 break;
825 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 default:
827 result = dev_priv->device->ftbl->getproperty(
828 dev_priv->device, param->type,
829 param->value, param->sizebytes);
830 }
831
832
833 return result;
834}
835
836static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
837 *dev_priv, unsigned int cmd,
838 void *data)
839{
840 int result = 0;
841 struct kgsl_device_waittimestamp *param = data;
842
843 /* Set the active count so that suspend doesn't do the
844 wrong thing */
845
846 dev_priv->device->active_cnt++;
847
Norman Geed7402ff2011-10-28 08:51:11 -0600848 trace_kgsl_waittimestamp_entry(dev_priv->device, param);
849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700850 result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
851 param->timestamp,
852 param->timeout);
853
Norman Geed7402ff2011-10-28 08:51:11 -0600854 trace_kgsl_waittimestamp_exit(dev_priv->device, result);
855
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 /* Fire off any pending suspend operations that are in flight */
857
858 INIT_COMPLETION(dev_priv->device->suspend_gate);
859 dev_priv->device->active_cnt--;
860 complete(&dev_priv->device->suspend_gate);
861
862 return result;
863}
864static bool check_ibdesc(struct kgsl_device_private *dev_priv,
865 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
866 bool parse)
867{
868 bool result = true;
869 unsigned int i;
870 for (i = 0; i < numibs; i++) {
871 struct kgsl_mem_entry *entry;
872 spin_lock(&dev_priv->process_priv->mem_lock);
873 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
874 ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
875 spin_unlock(&dev_priv->process_priv->mem_lock);
876 if (entry == NULL) {
877 KGSL_DRV_ERR(dev_priv->device,
878 "invalid cmd buffer gpuaddr %08x " \
879 "sizedwords %d\n", ibdesc[i].gpuaddr,
880 ibdesc[i].sizedwords);
881 result = false;
882 break;
883 }
884
885 if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
886 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
887 KGSL_DRV_ERR(dev_priv->device,
888 "invalid cmd buffer gpuaddr %08x " \
889 "sizedwords %d numibs %d/%d\n",
890 ibdesc[i].gpuaddr,
891 ibdesc[i].sizedwords, i+1, numibs);
892 result = false;
893 break;
894 }
895 }
896 return result;
897}
898
899static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
900 unsigned int cmd, void *data)
901{
902 int result = 0;
903 struct kgsl_ringbuffer_issueibcmds *param = data;
904 struct kgsl_ibdesc *ibdesc;
905 struct kgsl_context *context;
906
907#ifdef CONFIG_MSM_KGSL_DRM
908 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
909#endif
910
911 context = kgsl_find_context(dev_priv, param->drawctxt_id);
912 if (context == NULL) {
913 result = -EINVAL;
914 KGSL_DRV_ERR(dev_priv->device,
915 "invalid drawctxt drawctxt_id %d\n",
916 param->drawctxt_id);
917 goto done;
918 }
919
920 if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
921 KGSL_DRV_INFO(dev_priv->device,
922 "Using IB list mode for ib submission, numibs: %d\n",
923 param->numibs);
924 if (!param->numibs) {
925 KGSL_DRV_ERR(dev_priv->device,
926 "Invalid numibs as parameter: %d\n",
927 param->numibs);
928 result = -EINVAL;
929 goto done;
930 }
931
932 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
933 GFP_KERNEL);
934 if (!ibdesc) {
935 KGSL_MEM_ERR(dev_priv->device,
936 "kzalloc(%d) failed\n",
937 sizeof(struct kgsl_ibdesc) * param->numibs);
938 result = -ENOMEM;
939 goto done;
940 }
941
942 if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
943 sizeof(struct kgsl_ibdesc) * param->numibs)) {
944 result = -EFAULT;
945 KGSL_DRV_ERR(dev_priv->device,
946 "copy_from_user failed\n");
947 goto free_ibdesc;
948 }
949 } else {
950 KGSL_DRV_INFO(dev_priv->device,
951 "Using single IB submission mode for ib submission\n");
952 /* If user space driver is still using the old mode of
953 * submitting single ib then we need to support that as well */
954 ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
955 if (!ibdesc) {
956 KGSL_MEM_ERR(dev_priv->device,
957 "kzalloc(%d) failed\n",
958 sizeof(struct kgsl_ibdesc));
959 result = -ENOMEM;
960 goto done;
961 }
962 ibdesc[0].gpuaddr = param->ibdesc_addr;
963 ibdesc[0].sizedwords = param->numibs;
964 param->numibs = 1;
965 }
966
967 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
968 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
969 result = -EINVAL;
970 goto free_ibdesc;
971 }
972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 result = dev_priv->device->ftbl->issueibcmds(dev_priv,
974 context,
975 ibdesc,
976 param->numibs,
977 &param->timestamp,
978 param->flags);
979
Norman Geed7402ff2011-10-28 08:51:11 -0600980 trace_kgsl_issueibcmds(dev_priv->device, param, result);
981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982 if (result != 0)
983 goto free_ibdesc;
984
985 /* this is a check to try to detect if a command buffer was freed
986 * during issueibcmds().
987 */
988 if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
989 KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
990 result = -EINVAL;
991 goto free_ibdesc;
992 }
993
994free_ibdesc:
995 kfree(ibdesc);
996done:
997
998#ifdef CONFIG_MSM_KGSL_DRM
999 kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
1000#endif
1001
1002 return result;
1003}
1004
1005static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
1006 *dev_priv, unsigned int cmd,
1007 void *data)
1008{
1009 struct kgsl_cmdstream_readtimestamp *param = data;
1010
1011 param->timestamp =
1012 dev_priv->device->ftbl->readtimestamp(dev_priv->device,
1013 param->type);
1014
Norman Geed7402ff2011-10-28 08:51:11 -06001015 trace_kgsl_readtimestamp(dev_priv->device, param);
1016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001017 return 0;
1018}
1019
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001020static void kgsl_freemem_event_cb(struct kgsl_device *device,
1021 void *priv, u32 timestamp)
1022{
1023 struct kgsl_mem_entry *entry = priv;
1024 spin_lock(&entry->priv->mem_lock);
1025 list_del(&entry->list);
1026 spin_unlock(&entry->priv->mem_lock);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001027 trace_kgsl_mem_timestamp_free(entry, timestamp);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001028 kgsl_mem_entry_put(entry);
1029}
1030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
1032 *dev_priv, unsigned int cmd,
1033 void *data)
1034{
1035 int result = 0;
1036 struct kgsl_cmdstream_freememontimestamp *param = data;
1037 struct kgsl_mem_entry *entry = NULL;
Jeremy Gebbena5859272012-03-01 12:46:28 -07001038 struct kgsl_device *device = dev_priv->device;
1039 unsigned int cur;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040
1041 spin_lock(&dev_priv->process_priv->mem_lock);
1042 entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 spin_unlock(&dev_priv->process_priv->mem_lock);
1044
1045 if (entry) {
Jeremy Gebbena5859272012-03-01 12:46:28 -07001046 cur = device->ftbl->readtimestamp(device,
1047 KGSL_TIMESTAMP_RETIRED);
1048
1049 trace_kgsl_mem_timestamp_queue(entry, cur);
Jeremy Gebbenc81a3c62012-02-07 16:10:23 -07001050 result = kgsl_add_event(dev_priv->device, param->timestamp,
1051 kgsl_freemem_event_cb, entry, dev_priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052 } else {
1053 KGSL_DRV_ERR(dev_priv->device,
1054 "invalid gpuaddr %08x\n", param->gpuaddr);
1055 result = -EINVAL;
1056 }
1057
1058 return result;
1059}
1060
1061static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
1062 unsigned int cmd, void *data)
1063{
1064 int result = 0;
1065 struct kgsl_drawctxt_create *param = data;
1066 struct kgsl_context *context = NULL;
1067
1068 context = kgsl_create_context(dev_priv);
1069
1070 if (context == NULL) {
1071 result = -ENOMEM;
1072 goto done;
1073 }
1074
1075 if (dev_priv->device->ftbl->drawctxt_create)
1076 result = dev_priv->device->ftbl->drawctxt_create(
1077 dev_priv->device, dev_priv->process_priv->pagetable,
1078 context, param->flags);
1079
1080 param->drawctxt_id = context->id;
1081
1082done:
1083 if (result && context)
1084 kgsl_destroy_context(dev_priv, context);
1085
1086 return result;
1087}
1088
1089static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
1090 unsigned int cmd, void *data)
1091{
1092 int result = 0;
1093 struct kgsl_drawctxt_destroy *param = data;
1094 struct kgsl_context *context;
1095
1096 context = kgsl_find_context(dev_priv, param->drawctxt_id);
1097
1098 if (context == NULL) {
1099 result = -EINVAL;
1100 goto done;
1101 }
1102
1103 if (dev_priv->device->ftbl->drawctxt_destroy)
1104 dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
1105 context);
1106
1107 kgsl_destroy_context(dev_priv, context);
1108
1109done:
1110 return result;
1111}
1112
1113static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
1114 unsigned int cmd, void *data)
1115{
1116 int result = 0;
1117 struct kgsl_sharedmem_free *param = data;
1118 struct kgsl_process_private *private = dev_priv->process_priv;
1119 struct kgsl_mem_entry *entry = NULL;
1120
1121 spin_lock(&private->mem_lock);
1122 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1123 if (entry)
1124 list_del(&entry->list);
1125 spin_unlock(&private->mem_lock);
1126
1127 if (entry) {
Jeremy Gebbena5859272012-03-01 12:46:28 -07001128 trace_kgsl_mem_free(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 kgsl_mem_entry_put(entry);
1130 } else {
1131 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1132 result = -EINVAL;
1133 }
1134
1135 return result;
1136}
1137
1138static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
1139{
1140 struct vm_area_struct *vma;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141
1142 down_read(&current->mm->mmap_sem);
1143 vma = find_vma(current->mm, addr);
1144 up_read(&current->mm->mmap_sem);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001145 if (!vma)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001146 KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
Jordan Crouse2c542b62011-07-26 08:30:20 -06001147
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148 return vma;
1149}
1150
1151static long
1152kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
1153 unsigned int cmd, void *data)
1154{
1155 int result = 0, len = 0;
1156 struct kgsl_process_private *private = dev_priv->process_priv;
1157 struct kgsl_sharedmem_from_vmalloc *param = data;
1158 struct kgsl_mem_entry *entry = NULL;
1159 struct vm_area_struct *vma;
1160
1161 if (!kgsl_mmu_enabled())
1162 return -ENODEV;
1163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 if (!param->hostptr) {
1165 KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
1166 result = -EINVAL;
1167 goto error;
1168 }
1169
1170 vma = kgsl_get_vma_from_start_addr(param->hostptr);
1171 if (!vma) {
1172 result = -EINVAL;
1173 goto error;
1174 }
Jordan Crouse2c542b62011-07-26 08:30:20 -06001175
1176 /*
1177 * If the user specified a length, use it, otherwise try to
1178 * infer the length if the vma region
1179 */
1180 if (param->gpuaddr != 0) {
1181 len = param->gpuaddr;
1182 } else {
1183 /*
1184 * For this to work, we have to assume the VMA region is only
1185 * for this single allocation. If it isn't, then bail out
1186 */
1187 if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
1188 KGSL_CORE_ERR("VMA region does not match hostaddr\n");
1189 result = -EINVAL;
1190 goto error;
1191 }
1192
1193 len = vma->vm_end - vma->vm_start;
1194 }
1195
1196 /* Make sure it fits */
1197 if (len == 0 || param->hostptr + len > vma->vm_end) {
1198 KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 result = -EINVAL;
1200 goto error;
1201 }
1202
1203 entry = kgsl_mem_entry_create();
1204 if (entry == NULL) {
1205 result = -ENOMEM;
1206 goto error;
1207 }
1208
1209 result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
1210 private->pagetable, len,
1211 param->flags);
1212 if (result != 0)
1213 goto error_free_entry;
1214
1215 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1216
1217 result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
1218 if (result) {
1219 KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
1220 goto error_free_vmalloc;
1221 }
1222
1223 param->gpuaddr = entry->memdesc.gpuaddr;
1224
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001225 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001226
1227 kgsl_mem_entry_attach_process(entry, private);
1228
Jeremy Gebbena5859272012-03-01 12:46:28 -07001229 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230 /* Process specific statistics */
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001231 kgsl_process_add_stats(private, entry->memtype, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001232
1233 kgsl_check_idle(dev_priv->device);
1234 return 0;
1235
1236error_free_vmalloc:
1237 kgsl_sharedmem_free(&entry->memdesc);
1238
1239error_free_entry:
1240 kfree(entry);
1241
1242error:
1243 kgsl_check_idle(dev_priv->device);
1244 return result;
1245}
1246
1247static inline int _check_region(unsigned long start, unsigned long size,
1248 uint64_t len)
1249{
1250 uint64_t end = ((uint64_t) start) + size;
1251 return (end > len);
1252}
1253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
1255 unsigned long *vstart, struct file **filep)
1256{
1257 struct file *fbfile;
1258 int ret = 0;
1259 dev_t rdev;
1260 struct fb_info *info;
1261
1262 *filep = NULL;
Jordan Crousefd978432011-09-02 14:34:32 -06001263#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001264 if (!get_pmem_file(fd, start, vstart, len, filep))
1265 return 0;
Jordan Crousefd978432011-09-02 14:34:32 -06001266#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001267
1268 fbfile = fget(fd);
1269 if (fbfile == NULL) {
1270 KGSL_CORE_ERR("fget_light failed\n");
1271 return -1;
1272 }
1273
1274 rdev = fbfile->f_dentry->d_inode->i_rdev;
1275 info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
1276 if (info) {
1277 *start = info->fix.smem_start;
1278 *len = info->fix.smem_len;
1279 *vstart = (unsigned long)__va(info->fix.smem_start);
1280 ret = 0;
1281 } else {
1282 KGSL_CORE_ERR("framebuffer minor %d not found\n",
1283 MINOR(rdev));
1284 ret = -1;
1285 }
1286
1287 fput(fbfile);
1288
1289 return ret;
1290}
1291
1292static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
1293 struct kgsl_pagetable *pagetable,
1294 unsigned int fd, unsigned int offset,
1295 size_t size)
1296{
1297 int ret;
1298 unsigned long phys, virt, len;
1299 struct file *filep;
1300
1301 ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
1302 if (ret)
1303 return ret;
1304
Wei Zou4061c0b2011-07-08 10:24:22 -07001305 if (phys == 0) {
1306 ret = -EINVAL;
1307 goto err;
1308 }
1309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 if (offset >= len) {
1311 ret = -EINVAL;
1312 goto err;
1313 }
1314
1315 if (size == 0)
1316 size = len;
1317
1318 /* Adjust the size of the region to account for the offset */
1319 size += offset & ~PAGE_MASK;
1320
1321 size = ALIGN(size, PAGE_SIZE);
1322
1323 if (_check_region(offset & PAGE_MASK, size, len)) {
1324 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1325 "than pmem region length %ld\n",
1326 offset & PAGE_MASK, size, len);
1327 ret = -EINVAL;
1328 goto err;
1329
1330 }
1331
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001332 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001333
1334 entry->memdesc.pagetable = pagetable;
1335 entry->memdesc.size = size;
1336 entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
1337 entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001338
1339 ret = memdesc_sg_phys(&entry->memdesc,
1340 phys + (offset & PAGE_MASK), size);
1341 if (ret)
1342 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343
1344 return 0;
1345err:
Jordan Crousefd978432011-09-02 14:34:32 -06001346#ifdef CONFIG_ANDROID_PMEM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001347 put_pmem_file(filep);
Jordan Crousefd978432011-09-02 14:34:32 -06001348#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001349 return ret;
1350}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351
Jordan Croused17e9aa2011-10-12 16:57:48 -06001352static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
1353 void *addr, int size)
1354{
1355 int i;
1356 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
1357 unsigned long paddr = (unsigned long) addr;
1358
Jeff Boody28afec42012-01-18 15:47:46 -07001359 memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
Jordan Croused17e9aa2011-10-12 16:57:48 -06001360 if (memdesc->sg == NULL)
1361 return -ENOMEM;
1362
1363 memdesc->sglen = sglen;
1364 sg_init_table(memdesc->sg, sglen);
1365
1366 spin_lock(&current->mm->page_table_lock);
1367
1368 for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
1369 struct page *page;
1370 pmd_t *ppmd;
1371 pte_t *ppte;
1372 pgd_t *ppgd = pgd_offset(current->mm, paddr);
1373
1374 if (pgd_none(*ppgd) || pgd_bad(*ppgd))
1375 goto err;
1376
1377 ppmd = pmd_offset(ppgd, paddr);
1378 if (pmd_none(*ppmd) || pmd_bad(*ppmd))
1379 goto err;
1380
1381 ppte = pte_offset_map(ppmd, paddr);
1382 if (ppte == NULL)
1383 goto err;
1384
1385 page = pfn_to_page(pte_pfn(*ppte));
1386 if (!page)
1387 goto err;
1388
1389 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
1390 pte_unmap(ppte);
1391 }
1392
1393 spin_unlock(&current->mm->page_table_lock);
1394
1395 return 0;
1396
1397err:
1398 spin_unlock(&current->mm->page_table_lock);
Jeff Boody28afec42012-01-18 15:47:46 -07001399 vfree(memdesc->sg);
Jordan Croused17e9aa2011-10-12 16:57:48 -06001400 memdesc->sg = NULL;
1401
1402 return -EINVAL;
1403}
1404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
1406 struct kgsl_pagetable *pagetable,
1407 void *hostptr, unsigned int offset,
1408 size_t size)
1409{
1410 struct vm_area_struct *vma;
1411 unsigned int len;
1412
1413 down_read(&current->mm->mmap_sem);
1414 vma = find_vma(current->mm, (unsigned int) hostptr);
1415 up_read(&current->mm->mmap_sem);
1416
1417 if (!vma) {
1418 KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
1419 return -EINVAL;
1420 }
1421
1422 /* We don't necessarily start at vma->vm_start */
1423 len = vma->vm_end - (unsigned long) hostptr;
1424
1425 if (offset >= len)
1426 return -EINVAL;
1427
1428 if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
1429 !KGSL_IS_PAGE_ALIGNED(len)) {
1430 KGSL_CORE_ERR("user address len(%u)"
1431 "and start(%p) must be page"
1432 "aligned\n", len, hostptr);
1433 return -EINVAL;
1434 }
1435
1436 if (size == 0)
1437 size = len;
1438
1439 /* Adjust the size of the region to account for the offset */
1440 size += offset & ~PAGE_MASK;
1441
1442 size = ALIGN(size, PAGE_SIZE);
1443
1444 if (_check_region(offset & PAGE_MASK, size, len)) {
1445 KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
1446 "than region length %d\n",
1447 offset & PAGE_MASK, size, len);
1448 return -EINVAL;
1449 }
1450
1451 entry->memdesc.pagetable = pagetable;
1452 entry->memdesc.size = size;
1453 entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454
Jordan Croused17e9aa2011-10-12 16:57:48 -06001455 return memdesc_sg_virt(&entry->memdesc,
1456 hostptr + (offset & PAGE_MASK), size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001457}
1458
1459#ifdef CONFIG_ASHMEM
1460static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1461 struct kgsl_pagetable *pagetable,
1462 int fd, void *hostptr, size_t size)
1463{
1464 int ret;
1465 struct vm_area_struct *vma;
1466 struct file *filep, *vmfile;
1467 unsigned long len;
Jordan Crouse2c542b62011-07-26 08:30:20 -06001468 unsigned int hostaddr = (unsigned int) hostptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001469
Jordan Crouse2c542b62011-07-26 08:30:20 -06001470 vma = kgsl_get_vma_from_start_addr(hostaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 if (vma == NULL)
1472 return -EINVAL;
1473
Jordan Crouse2c542b62011-07-26 08:30:20 -06001474 if (vma->vm_pgoff || vma->vm_start != hostaddr) {
1475 KGSL_CORE_ERR("Invalid vma region\n");
1476 return -EINVAL;
1477 }
1478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 len = vma->vm_end - vma->vm_start;
1480
1481 if (size == 0)
1482 size = len;
1483
1484 if (size != len) {
1485 KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
1486 size, hostptr);
1487 return -EINVAL;
1488 }
1489
1490 ret = get_ashmem_file(fd, &filep, &vmfile, &len);
1491
1492 if (ret) {
1493 KGSL_CORE_ERR("get_ashmem_file failed\n");
1494 return ret;
1495 }
1496
1497 if (vmfile != vma->vm_file) {
1498 KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
1499 ret = -EINVAL;
1500 goto err;
1501 }
1502
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001503 entry->priv_data = filep;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504 entry->memdesc.pagetable = pagetable;
1505 entry->memdesc.size = ALIGN(size, PAGE_SIZE);
1506 entry->memdesc.hostptr = hostptr;
Jordan Croused17e9aa2011-10-12 16:57:48 -06001507
1508 ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
1509 if (ret)
1510 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511
1512 return 0;
1513
1514err:
1515 put_ashmem_file(filep);
1516 return ret;
1517}
1518#else
1519static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
1520 struct kgsl_pagetable *pagetable,
1521 int fd, void *hostptr, size_t size)
1522{
1523 return -EINVAL;
1524}
1525#endif
1526
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001527static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
1528 struct kgsl_pagetable *pagetable, int fd)
1529{
1530 struct ion_handle *handle;
1531 struct scatterlist *s;
1532 unsigned long flags;
1533
1534 if (kgsl_ion_client == NULL) {
1535 kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
1536 if (kgsl_ion_client == NULL)
1537 return -ENODEV;
1538 }
1539
1540 handle = ion_import_fd(kgsl_ion_client, fd);
1541 if (IS_ERR_OR_NULL(handle))
1542 return PTR_ERR(handle);
1543
1544 entry->memtype = KGSL_MEM_ENTRY_ION;
1545 entry->priv_data = handle;
1546 entry->memdesc.pagetable = pagetable;
1547 entry->memdesc.size = 0;
1548
1549 if (ion_handle_get_flags(kgsl_ion_client, handle, &flags))
1550 goto err;
1551
1552 entry->memdesc.sg = ion_map_dma(kgsl_ion_client, handle, flags);
1553
1554 if (IS_ERR_OR_NULL(entry->memdesc.sg))
1555 goto err;
1556
1557 /* Calculate the size of the memdesc from the sglist */
1558
1559 entry->memdesc.sglen = 0;
1560
1561 for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
1562 entry->memdesc.size += s->length;
1563 entry->memdesc.sglen++;
1564 }
1565
1566 return 0;
1567err:
1568 ion_free(kgsl_ion_client, handle);
1569 return -ENOMEM;
1570}
1571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
1573 unsigned int cmd, void *data)
1574{
1575 int result = -EINVAL;
1576 struct kgsl_map_user_mem *param = data;
1577 struct kgsl_mem_entry *entry = NULL;
1578 struct kgsl_process_private *private = dev_priv->process_priv;
Jason848741a2011-07-12 10:24:25 -07001579 enum kgsl_user_mem_type memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580
1581 entry = kgsl_mem_entry_create();
1582
1583 if (entry == NULL)
1584 return -ENOMEM;
1585
Jason848741a2011-07-12 10:24:25 -07001586 if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
1587 memtype = KGSL_USER_MEM_TYPE_PMEM;
1588 else
1589 memtype = param->memtype;
1590
1591 switch (memtype) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 case KGSL_USER_MEM_TYPE_PMEM:
1593 if (param->fd == 0 || param->len == 0)
1594 break;
1595
1596 result = kgsl_setup_phys_file(entry, private->pagetable,
1597 param->fd, param->offset,
1598 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001599 entry->memtype = KGSL_MEM_ENTRY_PMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600 break;
1601
1602 case KGSL_USER_MEM_TYPE_ADDR:
1603 if (!kgsl_mmu_enabled()) {
1604 KGSL_DRV_ERR(dev_priv->device,
1605 "Cannot map paged memory with the "
1606 "MMU disabled\n");
1607 break;
1608 }
1609
1610 if (param->hostptr == 0)
1611 break;
1612
1613 result = kgsl_setup_hostptr(entry, private->pagetable,
1614 (void *) param->hostptr,
1615 param->offset, param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001616 entry->memtype = KGSL_MEM_ENTRY_USER;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001617 break;
1618
1619 case KGSL_USER_MEM_TYPE_ASHMEM:
1620 if (!kgsl_mmu_enabled()) {
1621 KGSL_DRV_ERR(dev_priv->device,
1622 "Cannot map paged memory with the "
1623 "MMU disabled\n");
1624 break;
1625 }
1626
1627 if (param->hostptr == 0)
1628 break;
1629
1630 result = kgsl_setup_ashmem(entry, private->pagetable,
1631 param->fd, (void *) param->hostptr,
1632 param->len);
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001633
1634 entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 break;
Jordan Crouse8eab35a2011-10-12 16:57:48 -06001636 case KGSL_USER_MEM_TYPE_ION:
1637 result = kgsl_setup_ion(entry, private->pagetable,
1638 param->fd);
1639 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 default:
Jason848741a2011-07-12 10:24:25 -07001641 KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 break;
1643 }
1644
1645 if (result)
1646 goto error;
1647
1648 result = kgsl_mmu_map(private->pagetable,
1649 &entry->memdesc,
1650 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
1651
1652 if (result)
1653 goto error_put_file_ptr;
1654
1655 /* Adjust the returned value for a non 4k aligned offset */
1656 param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
1657
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001659 kgsl_driver.stats.mapped_max);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001661 kgsl_process_add_stats(private, entry->memtype, param->len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662
1663 kgsl_mem_entry_attach_process(entry, private);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001664 trace_kgsl_mem_map(entry, param->fd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665
1666 kgsl_check_idle(dev_priv->device);
1667 return result;
1668
1669 error_put_file_ptr:
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001670 if (entry->priv_data)
1671 fput(entry->priv_data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001672
1673error:
1674 kfree(entry);
1675 kgsl_check_idle(dev_priv->device);
1676 return result;
1677}
1678
1679/*This function flushes a graphics memory allocation from CPU cache
1680 *when caching is enabled with MMU*/
1681static long
1682kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
1683 unsigned int cmd, void *data)
1684{
1685 int result = 0;
1686 struct kgsl_mem_entry *entry;
1687 struct kgsl_sharedmem_free *param = data;
1688 struct kgsl_process_private *private = dev_priv->process_priv;
1689
1690 spin_lock(&private->mem_lock);
1691 entry = kgsl_sharedmem_find(private, param->gpuaddr);
1692 if (!entry) {
1693 KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
1694 result = -EINVAL;
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001695 goto done;
1696 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001697 if (!entry->memdesc.hostptr) {
1698 KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
1699 param->gpuaddr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 }
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001702
1703 kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704done:
Jeremy Gebben690f9d12011-08-08 16:33:49 -06001705 spin_unlock(&private->mem_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001706 return result;
1707}
1708
1709static long
1710kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
1711 unsigned int cmd, void *data)
1712{
1713 struct kgsl_process_private *private = dev_priv->process_priv;
1714 struct kgsl_gpumem_alloc *param = data;
1715 struct kgsl_mem_entry *entry;
1716 int result;
1717
1718 entry = kgsl_mem_entry_create();
1719 if (entry == NULL)
1720 return -ENOMEM;
1721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
1723 param->size, param->flags);
1724
1725 if (result == 0) {
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001726 entry->memtype = KGSL_MEM_ENTRY_KERNEL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 kgsl_mem_entry_attach_process(entry, private);
1728 param->gpuaddr = entry->memdesc.gpuaddr;
1729
Jordan Crouse1b897cf2011-10-12 16:57:48 -06001730 kgsl_process_add_stats(private, entry->memtype, param->size);
Jeremy Gebbena5859272012-03-01 12:46:28 -07001731 trace_kgsl_mem_alloc(entry);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 } else
1733 kfree(entry);
1734
1735 kgsl_check_idle(dev_priv->device);
1736 return result;
1737}
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001738static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
1739 unsigned int cmd, void *data)
1740{
1741 int result = 0;
1742 struct kgsl_cff_syncmem *param = data;
1743 struct kgsl_process_private *private = dev_priv->process_priv;
1744 struct kgsl_mem_entry *entry = NULL;
1745
1746 spin_lock(&private->mem_lock);
1747 entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
1748 if (entry)
1749 kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
1750 param->len, true);
1751 else
1752 result = -EINVAL;
1753 spin_unlock(&private->mem_lock);
1754 return result;
1755}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001756
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001757static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
1758 unsigned int cmd, void *data)
1759{
1760 int result = 0;
1761 struct kgsl_cff_user_event *param = data;
1762
1763 kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
1764 param->op3, param->op4, param->op5);
1765
1766 return result;
1767}
1768
Jordan Croused4bc9d22011-11-17 13:39:21 -07001769#ifdef CONFIG_GENLOCK
1770struct kgsl_genlock_event_priv {
1771 struct genlock_handle *handle;
1772 struct genlock *lock;
1773};
1774
1775/**
1776 * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
1777 * @device - The KGSL device that expired the timestamp
1778 * @priv - private data for the event
1779 * @timestamp - the timestamp that triggered the event
1780 *
1781 * Release a genlock lock following the expiration of a timestamp
1782 */
1783
1784static void kgsl_genlock_event_cb(struct kgsl_device *device,
1785 void *priv, u32 timestamp)
1786{
1787 struct kgsl_genlock_event_priv *ev = priv;
1788 int ret;
1789
1790 ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
1791 if (ret)
1792 KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
1793
1794 genlock_put_handle(ev->handle);
1795
1796 kfree(ev);
1797}
1798
1799/**
1800 * kgsl_add_genlock-event - Create a new genlock event
1801 * @device - KGSL device to create the event on
1802 * @timestamp - Timestamp to trigger the event
1803 * @data - User space buffer containing struct kgsl_genlock_event_priv
1804 * @len - length of the userspace buffer
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001805 * @owner - driver instance that owns this event
Jordan Croused4bc9d22011-11-17 13:39:21 -07001806 * @returns 0 on success or error code on error
1807 *
1808 * Attack to a genlock handle and register an event to release the
1809 * genlock lock when the timestamp expires
1810 */
1811
1812static int kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001813 u32 timestamp, void __user *data, int len,
1814 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001815{
1816 struct kgsl_genlock_event_priv *event;
1817 struct kgsl_timestamp_event_genlock priv;
1818 int ret;
1819
1820 if (len != sizeof(priv))
1821 return -EINVAL;
1822
1823 if (copy_from_user(&priv, data, sizeof(priv)))
1824 return -EFAULT;
1825
1826 event = kzalloc(sizeof(*event), GFP_KERNEL);
1827
1828 if (event == NULL)
1829 return -ENOMEM;
1830
1831 event->handle = genlock_get_handle_fd(priv.handle);
1832
1833 if (IS_ERR(event->handle)) {
1834 int ret = PTR_ERR(event->handle);
1835 kfree(event);
1836 return ret;
1837 }
1838
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001839 ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event,
1840 owner);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001841 if (ret)
1842 kfree(event);
1843
1844 return ret;
1845}
1846#else
1847static long kgsl_add_genlock_event(struct kgsl_device *device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001848 u32 timestamp, void __user *data, int len,
1849 struct kgsl_device_private *owner)
Jordan Croused4bc9d22011-11-17 13:39:21 -07001850{
1851 return -EINVAL;
1852}
1853#endif
1854
1855/**
1856 * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
1857 * @dev_priv - pointer to the private device structure
1858 * @cmd - the ioctl cmd passed from kgsl_ioctl
1859 * @data - the user data buffer from kgsl_ioctl
1860 * @returns 0 on success or error code on failure
1861 */
1862
1863static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
1864 unsigned int cmd, void *data)
1865{
1866 struct kgsl_timestamp_event *param = data;
1867 int ret;
1868
1869 switch (param->type) {
1870 case KGSL_TIMESTAMP_EVENT_GENLOCK:
1871 ret = kgsl_add_genlock_event(dev_priv->device,
Jeremy Gebbenfd87f9a2012-02-10 07:06:09 -07001872 param->timestamp, param->priv, param->len,
1873 dev_priv);
Jordan Croused4bc9d22011-11-17 13:39:21 -07001874 break;
1875 default:
1876 ret = -EINVAL;
1877 }
1878
1879 return ret;
1880}
1881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
1883 unsigned int, void *);
1884
1885#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
1886 [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
1887
1888static const struct {
1889 unsigned int cmd;
1890 kgsl_ioctl_func_t func;
1891 int lock;
1892} kgsl_ioctl_funcs[] = {
1893 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
1894 kgsl_ioctl_device_getproperty, 1),
1895 KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
1896 kgsl_ioctl_device_waittimestamp, 1),
1897 KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
1898 kgsl_ioctl_rb_issueibcmds, 1),
1899 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
1900 kgsl_ioctl_cmdstream_readtimestamp, 1),
1901 KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
1902 kgsl_ioctl_cmdstream_freememontimestamp, 1),
1903 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
1904 kgsl_ioctl_drawctxt_create, 1),
1905 KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
1906 kgsl_ioctl_drawctxt_destroy, 1),
1907 KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
1908 kgsl_ioctl_map_user_mem, 0),
1909 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
1910 kgsl_ioctl_map_user_mem, 0),
1911 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
1912 kgsl_ioctl_sharedmem_free, 0),
1913 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
1914 kgsl_ioctl_sharedmem_from_vmalloc, 0),
1915 KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
1916 kgsl_ioctl_sharedmem_flush_cache, 0),
1917 KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
1918 kgsl_ioctl_gpumem_alloc, 0),
Jeremy Gebbena7423e42011-04-18 15:11:21 -06001919 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
1920 kgsl_ioctl_cff_syncmem, 0),
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -06001921 KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
1922 kgsl_ioctl_cff_user_event, 0),
Jordan Croused4bc9d22011-11-17 13:39:21 -07001923 KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
Lucille Sylvester9329cf02011-12-02 14:30:41 -07001924 kgsl_ioctl_timestamp_event, 1),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925};
1926
1927static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1928{
1929 struct kgsl_device_private *dev_priv = filep->private_data;
1930 unsigned int nr = _IOC_NR(cmd);
1931 kgsl_ioctl_func_t func;
1932 int lock, ret;
1933 char ustack[64];
1934 void *uptr = NULL;
1935
1936 BUG_ON(dev_priv == NULL);
1937
1938 /* Workaround for an previously incorrectly defined ioctl code.
1939 This helps ensure binary compatability */
1940
1941 if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
1942 cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
Jason Varbedian80ba33d2011-07-11 17:29:05 -07001943 else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
1944 cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945
1946 if (cmd & (IOC_IN | IOC_OUT)) {
1947 if (_IOC_SIZE(cmd) < sizeof(ustack))
1948 uptr = ustack;
1949 else {
1950 uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
1951 if (uptr == NULL) {
1952 KGSL_MEM_ERR(dev_priv->device,
1953 "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
1954 ret = -ENOMEM;
1955 goto done;
1956 }
1957 }
1958
1959 if (cmd & IOC_IN) {
1960 if (copy_from_user(uptr, (void __user *) arg,
1961 _IOC_SIZE(cmd))) {
1962 ret = -EFAULT;
1963 goto done;
1964 }
1965 } else
1966 memset(uptr, 0, _IOC_SIZE(cmd));
1967 }
1968
1969 if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
1970 kgsl_ioctl_funcs[nr].func != NULL) {
1971 func = kgsl_ioctl_funcs[nr].func;
1972 lock = kgsl_ioctl_funcs[nr].lock;
1973 } else {
1974 func = dev_priv->device->ftbl->ioctl;
1975 if (!func) {
1976 KGSL_DRV_INFO(dev_priv->device,
1977 "invalid ioctl code %08x\n", cmd);
Jeremy Gebbenc15b4612012-01-09 09:44:11 -07001978 ret = -ENOIOCTLCMD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 goto done;
1980 }
1981 lock = 1;
1982 }
1983
1984 if (lock) {
1985 mutex_lock(&dev_priv->device->mutex);
1986 kgsl_check_suspended(dev_priv->device);
1987 }
1988
1989 ret = func(dev_priv, cmd, uptr);
1990
1991 if (lock) {
1992 kgsl_check_idle_locked(dev_priv->device);
1993 mutex_unlock(&dev_priv->device->mutex);
1994 }
1995
1996 if (ret == 0 && (cmd & IOC_OUT)) {
1997 if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
1998 ret = -EFAULT;
1999 }
2000
2001done:
2002 if (_IOC_SIZE(cmd) >= sizeof(ustack))
2003 kfree(uptr);
2004
2005 return ret;
2006}
2007
2008static int
2009kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
2010{
2011 struct kgsl_memdesc *memdesc = &device->memstore;
2012 int result;
2013 unsigned int vma_size = vma->vm_end - vma->vm_start;
2014
2015 /* The memstore can only be mapped as read only */
2016
2017 if (vma->vm_flags & VM_WRITE)
2018 return -EPERM;
2019
2020 if (memdesc->size != vma_size) {
2021 KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
2022 vma_size, memdesc->size);
2023 return -EINVAL;
2024 }
2025
2026 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2027
2028 result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
2029 vma_size, vma->vm_page_prot);
2030 if (result != 0)
2031 KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
2032 result);
2033
2034 return result;
2035}
2036
Jordan Crouse4283e172011-09-26 14:45:47 -06002037/*
2038 * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
2039 * Increase the refcount to make sure that the accounting stays correct
2040 */
2041
2042static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
2043{
2044 struct kgsl_mem_entry *entry = vma->vm_private_data;
2045 kgsl_mem_entry_get(entry);
2046}
2047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048static int
2049kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2050{
2051 struct kgsl_mem_entry *entry = vma->vm_private_data;
2052
Jordan Croused17e9aa2011-10-12 16:57:48 -06002053 if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002054 return VM_FAULT_SIGBUS;
2055
2056 return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
2057}
2058
2059static void
2060kgsl_gpumem_vm_close(struct vm_area_struct *vma)
2061{
2062 struct kgsl_mem_entry *entry = vma->vm_private_data;
2063 kgsl_mem_entry_put(entry);
2064}
2065
2066static struct vm_operations_struct kgsl_gpumem_vm_ops = {
Jordan Crouse4283e172011-09-26 14:45:47 -06002067 .open = kgsl_gpumem_vm_open,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068 .fault = kgsl_gpumem_vm_fault,
2069 .close = kgsl_gpumem_vm_close,
2070};
2071
2072static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
2073{
2074 unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002075 struct kgsl_device_private *dev_priv = file->private_data;
2076 struct kgsl_process_private *private = dev_priv->process_priv;
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002077 struct kgsl_mem_entry *tmp, *entry = NULL;
Jordan Crouse2db0af92011-08-08 16:05:09 -06002078 struct kgsl_device *device = dev_priv->device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079
2080 /* Handle leagacy behavior for memstore */
2081
2082 if (vma_offset == device->memstore.physaddr)
2083 return kgsl_mmap_memstore(device, vma);
2084
2085 /* Find a chunk of GPU memory */
2086
2087 spin_lock(&private->mem_lock);
Jordan Crouse976cf0e2011-09-12 10:41:49 -06002088 list_for_each_entry(tmp, &private->mem_list, list) {
2089 if (vma_offset == tmp->memdesc.gpuaddr) {
2090 kgsl_mem_entry_get(tmp);
2091 entry = tmp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002092 break;
2093 }
2094 }
2095 spin_unlock(&private->mem_lock);
2096
2097 if (entry == NULL)
2098 return -EINVAL;
2099
Jordan Croused17e9aa2011-10-12 16:57:48 -06002100 if (!entry->memdesc.ops ||
2101 !entry->memdesc.ops->vmflags ||
2102 !entry->memdesc.ops->vmfault)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002103 return -EINVAL;
2104
2105 vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
2106
2107 vma->vm_private_data = entry;
2108 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
2109 vma->vm_ops = &kgsl_gpumem_vm_ops;
2110 vma->vm_file = file;
2111
2112 return 0;
2113}
2114
2115static const struct file_operations kgsl_fops = {
2116 .owner = THIS_MODULE,
2117 .release = kgsl_release,
2118 .open = kgsl_open,
2119 .mmap = kgsl_mmap,
2120 .unlocked_ioctl = kgsl_ioctl,
2121};
2122
2123struct kgsl_driver kgsl_driver = {
2124 .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
2125 .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
2126 .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
2127};
2128EXPORT_SYMBOL(kgsl_driver);
2129
2130void kgsl_unregister_device(struct kgsl_device *device)
2131{
2132 int minor;
2133
2134 mutex_lock(&kgsl_driver.devlock);
2135 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2136 if (device == kgsl_driver.devp[minor])
2137 break;
2138 }
2139
2140 mutex_unlock(&kgsl_driver.devlock);
2141
2142 if (minor == KGSL_DEVICE_MAX)
2143 return;
2144
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002145 kgsl_device_snapshot_close(device);
2146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002147 kgsl_cffdump_close(device->id);
2148 kgsl_pwrctrl_uninit_sysfs(device);
2149
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002150 if (cpu_is_msm8x60())
2151 wake_lock_destroy(&device->idle_wakelock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002152
2153 idr_destroy(&device->context_idr);
2154
2155 if (device->memstore.hostptr)
2156 kgsl_sharedmem_free(&device->memstore);
2157
2158 kgsl_mmu_close(device);
2159
2160 if (device->work_queue) {
2161 destroy_workqueue(device->work_queue);
2162 device->work_queue = NULL;
2163 }
2164
2165 device_destroy(kgsl_driver.class,
2166 MKDEV(MAJOR(kgsl_driver.major), minor));
2167
2168 mutex_lock(&kgsl_driver.devlock);
2169 kgsl_driver.devp[minor] = NULL;
2170 mutex_unlock(&kgsl_driver.devlock);
2171}
2172EXPORT_SYMBOL(kgsl_unregister_device);
2173
2174int
2175kgsl_register_device(struct kgsl_device *device)
2176{
2177 int minor, ret;
2178 dev_t dev;
2179
2180 /* Find a minor for the device */
2181
2182 mutex_lock(&kgsl_driver.devlock);
2183 for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
2184 if (kgsl_driver.devp[minor] == NULL) {
2185 kgsl_driver.devp[minor] = device;
2186 break;
2187 }
2188 }
2189
2190 mutex_unlock(&kgsl_driver.devlock);
2191
2192 if (minor == KGSL_DEVICE_MAX) {
2193 KGSL_CORE_ERR("minor devices exhausted\n");
2194 return -ENODEV;
2195 }
2196
2197 /* Create the device */
2198 dev = MKDEV(MAJOR(kgsl_driver.major), minor);
2199 device->dev = device_create(kgsl_driver.class,
2200 device->parentdev,
2201 dev, device,
2202 device->name);
2203
2204 if (IS_ERR(device->dev)) {
2205 ret = PTR_ERR(device->dev);
2206 KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
2207 goto err_devlist;
2208 }
2209
2210 dev_set_drvdata(device->parentdev, device);
2211
2212 /* Generic device initialization */
2213 init_waitqueue_head(&device->wait_queue);
2214
2215 kgsl_cffdump_open(device->id);
2216
2217 init_completion(&device->hwaccess_gate);
2218 init_completion(&device->suspend_gate);
2219
2220 ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
2221
2222 setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
2223 ret = kgsl_create_device_workqueue(device);
2224 if (ret)
2225 goto err_devlist;
2226
2227 INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
Jordan Crouse1bf80aa2011-10-12 16:57:47 -06002228 INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229
Jordan Croused4bc9d22011-11-17 13:39:21 -07002230 INIT_LIST_HEAD(&device->events);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002231
2232 ret = kgsl_mmu_init(device);
2233 if (ret != 0)
2234 goto err_dest_work_q;
2235
2236 ret = kgsl_allocate_contiguous(&device->memstore,
2237 sizeof(struct kgsl_devmemstore));
2238
2239 if (ret != 0)
2240 goto err_close_mmu;
2241
Lucille Sylvesteref44e7332011-11-02 13:21:17 -07002242 if (cpu_is_msm8x60())
2243 wake_lock_init(&device->idle_wakelock,
2244 WAKE_LOCK_IDLE, device->name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002245
2246 idr_init(&device->context_idr);
2247
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002248 /* Initalize the snapshot engine */
2249 kgsl_device_snapshot_init(device);
2250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251 /* sysfs and debugfs initalization - failure here is non fatal */
2252
2253 /* Initialize logging */
2254 kgsl_device_debugfs_init(device);
2255
2256 /* Initialize common sysfs entries */
2257 kgsl_pwrctrl_init_sysfs(device);
2258
2259 return 0;
2260
2261err_close_mmu:
2262 kgsl_mmu_close(device);
2263err_dest_work_q:
2264 destroy_workqueue(device->work_queue);
2265 device->work_queue = NULL;
2266err_devlist:
2267 mutex_lock(&kgsl_driver.devlock);
2268 kgsl_driver.devp[minor] = NULL;
2269 mutex_unlock(&kgsl_driver.devlock);
2270
2271 return ret;
2272}
2273EXPORT_SYMBOL(kgsl_register_device);
2274
2275int kgsl_device_platform_probe(struct kgsl_device *device,
2276 irqreturn_t (*dev_isr) (int, void*))
2277{
Michael Street8bacdd02012-01-05 14:55:01 -08002278 int result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279 int status = -EINVAL;
2280 struct kgsl_memregion *regspace = NULL;
2281 struct resource *res;
2282 struct platform_device *pdev =
2283 container_of(device->parentdev, struct platform_device, dev);
2284
2285 pm_runtime_enable(device->parentdev);
2286
2287 status = kgsl_pwrctrl_init(device);
2288 if (status)
2289 goto error;
2290
2291 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2292 device->iomemname);
2293 if (res == NULL) {
2294 KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
2295 status = -EINVAL;
2296 goto error_pwrctrl_close;
2297 }
2298 if (res->start == 0 || resource_size(res) == 0) {
2299 KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
2300 status = -EINVAL;
2301 goto error_pwrctrl_close;
2302 }
2303
2304 regspace = &device->regspace;
2305 regspace->mmio_phys_base = res->start;
2306 regspace->sizebytes = resource_size(res);
2307
2308 if (!request_mem_region(regspace->mmio_phys_base,
2309 regspace->sizebytes, device->name)) {
2310 KGSL_DRV_ERR(device, "request_mem_region failed\n");
2311 status = -ENODEV;
2312 goto error_pwrctrl_close;
2313 }
2314
2315 regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
2316 regspace->sizebytes);
2317
2318 if (regspace->mmio_virt_base == NULL) {
2319 KGSL_DRV_ERR(device, "ioremap failed\n");
2320 status = -ENODEV;
2321 goto error_release_mem;
2322 }
2323
2324 status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
2325 IRQF_TRIGGER_HIGH, device->name, device);
2326 if (status) {
2327 KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
2328 device->pwrctrl.interrupt_num, status);
2329 goto error_iounmap;
2330 }
2331 device->pwrctrl.have_irq = 1;
2332 disable_irq(device->pwrctrl.interrupt_num);
2333
2334 KGSL_DRV_INFO(device,
2335 "dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
2336 device->id, regspace->mmio_phys_base,
2337 regspace->sizebytes, regspace->mmio_virt_base);
2338
Michael Street8bacdd02012-01-05 14:55:01 -08002339 result = kgsl_drm_init(pdev);
2340 if (result)
2341 goto error_iounmap;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342
2343 status = kgsl_register_device(device);
2344 if (!status)
2345 return status;
2346
2347 free_irq(device->pwrctrl.interrupt_num, NULL);
2348 device->pwrctrl.have_irq = 0;
2349error_iounmap:
2350 iounmap(regspace->mmio_virt_base);
2351 regspace->mmio_virt_base = NULL;
2352error_release_mem:
2353 release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
2354error_pwrctrl_close:
2355 kgsl_pwrctrl_close(device);
2356error:
2357 return status;
2358}
2359EXPORT_SYMBOL(kgsl_device_platform_probe);
2360
2361void kgsl_device_platform_remove(struct kgsl_device *device)
2362{
2363 struct kgsl_memregion *regspace = &device->regspace;
2364
2365 kgsl_unregister_device(device);
2366
2367 if (regspace->mmio_virt_base != NULL) {
2368 iounmap(regspace->mmio_virt_base);
2369 regspace->mmio_virt_base = NULL;
2370 release_mem_region(regspace->mmio_phys_base,
2371 regspace->sizebytes);
2372 }
2373 kgsl_pwrctrl_close(device);
2374
2375 pm_runtime_disable(device->parentdev);
2376}
2377EXPORT_SYMBOL(kgsl_device_platform_remove);
2378
2379static int __devinit
2380kgsl_ptdata_init(void)
2381{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002382 kgsl_driver.ptpool = kgsl_mmu_ptpool_init(KGSL_PAGETABLE_SIZE,
2383 kgsl_pagetable_count);
2384 if (!kgsl_driver.ptpool)
2385 return -ENOMEM;
2386 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002387}
2388
2389static void kgsl_core_exit(void)
2390{
2391 unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
2392
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002393 kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
2394 kgsl_driver.ptpool = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002395
2396 device_unregister(&kgsl_driver.virtdev);
2397
2398 if (kgsl_driver.class) {
2399 class_destroy(kgsl_driver.class);
2400 kgsl_driver.class = NULL;
2401 }
2402
2403 kgsl_drm_exit();
2404 kgsl_cffdump_destroy();
Jordan Croused8f1c6b2011-10-04 09:31:29 -06002405 kgsl_core_debugfs_close();
2406 kgsl_sharedmem_uninit_sysfs();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002407}
2408
2409static int __init kgsl_core_init(void)
2410{
2411 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002412 /* alloc major and minor device numbers */
2413 result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
2414 KGSL_NAME);
2415 if (result < 0) {
2416 KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
2417 goto err;
2418 }
2419
2420 cdev_init(&kgsl_driver.cdev, &kgsl_fops);
2421 kgsl_driver.cdev.owner = THIS_MODULE;
2422 kgsl_driver.cdev.ops = &kgsl_fops;
2423 result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
2424 KGSL_DEVICE_MAX);
2425
2426 if (result) {
2427 KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
2428 " result= %d\n", kgsl_driver.major, result);
2429 goto err;
2430 }
2431
2432 kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
2433
2434 if (IS_ERR(kgsl_driver.class)) {
2435 result = PTR_ERR(kgsl_driver.class);
2436 KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
2437 goto err;
2438 }
2439
2440 /* Make a virtual device for managing core related things
2441 in sysfs */
2442 kgsl_driver.virtdev.class = kgsl_driver.class;
2443 dev_set_name(&kgsl_driver.virtdev, "kgsl");
2444 result = device_register(&kgsl_driver.virtdev);
2445 if (result) {
2446 KGSL_CORE_ERR("driver_register failed\n");
2447 goto err;
2448 }
2449
2450 /* Make kobjects in the virtual device for storing statistics */
2451
2452 kgsl_driver.ptkobj =
2453 kobject_create_and_add("pagetables",
2454 &kgsl_driver.virtdev.kobj);
2455
2456 kgsl_driver.prockobj =
2457 kobject_create_and_add("proc",
2458 &kgsl_driver.virtdev.kobj);
2459
2460 kgsl_core_debugfs_init();
2461
2462 kgsl_sharedmem_init_sysfs();
2463 kgsl_cffdump_init();
2464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465 INIT_LIST_HEAD(&kgsl_driver.process_list);
2466
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06002467 INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
2468
2469 kgsl_mmu_set_mmutype(ksgl_mmu_type);
2470
2471 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
2472 result = kgsl_ptdata_init();
2473 if (result)
2474 goto err;
2475 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477 return 0;
2478
2479err:
2480 kgsl_core_exit();
2481 return result;
2482}
2483
2484module_init(kgsl_core_init);
2485module_exit(kgsl_core_exit);
2486
2487MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
2488MODULE_DESCRIPTION("MSM GPU driver");
2489MODULE_LICENSE("GPL");