blob: dba2dfcfb4a59f05216a4d21c98420d25d2a32ce [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
20#include <linux/notifier.h>
21
22#include "kgsl.h"
23#include "kgsl_device.h"
24#include "kgsl_drm.h"
25#include "kgsl_mmu.h"
26#include "kgsl_sharedmem.h"
27
28#define DRIVER_AUTHOR "Qualcomm"
29#define DRIVER_NAME "kgsl"
30#define DRIVER_DESC "KGSL DRM"
31#define DRIVER_DATE "20100127"
32
33#define DRIVER_MAJOR 2
34#define DRIVER_MINOR 1
35#define DRIVER_PATCHLEVEL 1
36
37#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
38
39#define ENTRY_EMPTY -1
40#define ENTRY_NEEDS_CLEANUP -2
41
42#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
43#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
44
45/* Returns true if the memory type is in PMEM */
46
47#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
48#define TYPE_IS_PMEM(_t) \
49 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
50 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
51 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
52#else
53#define TYPE_IS_PMEM(_t) \
54 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
55 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
56#endif
57
58/* Returns true if the memory type is regular */
59
60#define TYPE_IS_MEM(_t) \
61 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
62 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
63 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
64
65#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
66
67/* Returns true if KMEM region is uncached */
68
69#define IS_MEM_UNCACHED(_t) \
70 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
71 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
72 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
73
74struct drm_kgsl_gem_object_wait_list_entry {
75 struct list_head list;
76 int pid;
77 int in_use;
78 wait_queue_head_t process_wait_q;
79};
80
81struct drm_kgsl_gem_object_fence {
82 int32_t fence_id;
83 unsigned int num_buffers;
84 int ts_valid;
85 unsigned int timestamp;
86 int ts_device;
87 int lockpid;
88 struct list_head buffers_in_fence;
89};
90
91struct drm_kgsl_gem_object_fence_list_entry {
92 struct list_head list;
93 int in_use;
94 struct drm_gem_object *gem_obj;
95};
96
97static int32_t fence_id = 0x1;
98
99static struct drm_kgsl_gem_object_fence
100 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
101
102struct drm_kgsl_gem_object {
103 struct drm_gem_object *obj;
104 uint32_t type;
105 struct kgsl_memdesc memdesc;
106 struct kgsl_pagetable *pagetable;
107 uint64_t mmap_offset;
108 int bufcount;
109 int flags;
110 struct list_head list;
111 int active;
112
113 struct {
114 uint32_t offset;
115 uint32_t gpuaddr;
116 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
117
118 int bound;
119 int lockpid;
120 /* Put these here to avoid allocing all the time */
121 struct drm_kgsl_gem_object_wait_list_entry
122 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
123 /* Each object can only appear in a single fence */
124 struct drm_kgsl_gem_object_fence_list_entry
125 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
126
127 struct list_head wait_list;
128};
129
130/* This is a global list of all the memory currently mapped in the MMU */
131static struct list_head kgsl_mem_list;
132
133static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
134{
135 int cacheop = 0;
136
137 switch (op) {
138 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
139 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
140 DRM_KGSL_GEM_CACHE_WBACKWA))
141 cacheop = KGSL_CACHE_OP_CLEAN;
142
143 break;
144
145 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
146 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
147 DRM_KGSL_GEM_CACHE_WBACKWA |
148 DRM_KGSL_GEM_CACHE_WTHROUGH))
149 cacheop = KGSL_CACHE_OP_INV;
150 }
151
152 kgsl_cache_range_op(memdesc, cacheop);
153}
154
155/* Flush all the memory mapped in the MMU */
156
157void kgsl_gpu_mem_flush(int op)
158{
159 struct drm_kgsl_gem_object *entry;
160
161 list_for_each_entry(entry, &kgsl_mem_list, list) {
162 kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
163 }
164
165 /* Takes care of WT/WC case.
166 * More useful when we go barrierless
167 */
168 dmb();
169}
170
171/* TODO:
172 * Add vsync wait */
173
174static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
175{
176 return 0;
177}
178
179static int kgsl_drm_unload(struct drm_device *dev)
180{
181 return 0;
182}
183
184struct kgsl_drm_device_priv {
185 struct kgsl_device *device[KGSL_DEVICE_MAX];
186 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
187};
188
189static int kgsl_ts_notifier_cb(struct notifier_block *blk,
190 unsigned long code, void *_param);
191
192static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
193
194static int kgsl_drm_firstopen(struct drm_device *dev)
195{
196 int i;
197
198 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
199 struct kgsl_device *device = kgsl_get_device(i);
200
201 if (device == NULL)
202 continue;
203
204 kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
205 kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
206 }
207
208 return 0;
209}
210
211void kgsl_drm_lastclose(struct drm_device *dev)
212{
213 int i;
214
215 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
216 struct kgsl_device *device = kgsl_get_device(i);
217 if (device == NULL)
218 continue;
219
220 kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
221 }
222}
223
224void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
225{
226}
227
228static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
229{
230 return 0;
231}
232
233static int kgsl_drm_resume(struct drm_device *dev)
234{
235 return 0;
236}
237
238static void
239kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
240{
241 struct drm_device *dev = obj->dev;
242 struct drm_gem_mm *mm = dev->mm_private;
243 struct drm_kgsl_gem_object *priv = obj->driver_private;
244 struct drm_map_list *list;
245
246 list = &obj->map_list;
247 drm_ht_remove_item(&mm->offset_hash, &list->hash);
248 if (list->file_offset_node) {
249 drm_mm_put_block(list->file_offset_node);
250 list->file_offset_node = NULL;
251 }
252
253 kfree(list->map);
254 list->map = NULL;
255
256 priv->mmap_offset = 0;
257}
258
259static int
260kgsl_gem_memory_allocated(struct drm_gem_object *obj)
261{
262 struct drm_kgsl_gem_object *priv = obj->driver_private;
263 return priv->memdesc.size ? 1 : 0;
264}
265
266static int
267kgsl_gem_alloc_memory(struct drm_gem_object *obj)
268{
269 struct drm_kgsl_gem_object *priv = obj->driver_private;
270 int index;
271
272 /* Return if the memory is already allocated */
273
274 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
275 return 0;
276
277 if (TYPE_IS_PMEM(priv->type)) {
278 int type;
279
280 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
281 priv->type & DRM_KGSL_GEM_PMEM_EBI)
282 type = PMEM_MEMTYPE_EBI1;
283 else
284 type = PMEM_MEMTYPE_SMI;
285
286 priv->memdesc.physaddr =
287 pmem_kalloc(obj->size * priv->bufcount,
288 type | PMEM_ALIGNMENT_4K);
289
290 if (IS_ERR((void *) priv->memdesc.physaddr)) {
291 DRM_ERROR("Unable to allocate PMEM memory\n");
292 return -ENOMEM;
293 }
294
295 priv->memdesc.size = obj->size * priv->bufcount;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296
297 } else if (TYPE_IS_MEM(priv->type)) {
298 priv->memdesc.hostptr =
299 vmalloc_user(obj->size * priv->bufcount);
300
301 if (priv->memdesc.hostptr == NULL) {
302 DRM_ERROR("Unable to allocate vmalloc memory\n");
303 return -ENOMEM;
304 }
305
306 priv->memdesc.size = obj->size * priv->bufcount;
307 priv->memdesc.ops = &kgsl_vmalloc_ops;
308 } else
309 return -EINVAL;
310
311 for (index = 0; index < priv->bufcount; index++)
312 priv->bufs[index].offset = index * obj->size;
313
314
315 return 0;
316}
317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318static void
319kgsl_gem_unmap(struct drm_gem_object *obj)
320{
321 struct drm_kgsl_gem_object *priv = obj->driver_private;
322
323 if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
324 return;
325
326 kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
327
328 kgsl_mmu_putpagetable(priv->pagetable);
329 priv->pagetable = NULL;
330
331 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
332 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
333 list_del(&priv->list);
334
335 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
336}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337
338static void
339kgsl_gem_free_memory(struct drm_gem_object *obj)
340{
341 struct drm_kgsl_gem_object *priv = obj->driver_private;
342
343 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
344 return;
345
346 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
347 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
348
349 kgsl_gem_unmap(obj);
350
351 if (TYPE_IS_PMEM(priv->type))
352 pmem_kfree(priv->memdesc.physaddr);
353
354 kgsl_sharedmem_free(&priv->memdesc);
355}
356
357int
358kgsl_gem_init_object(struct drm_gem_object *obj)
359{
360 struct drm_kgsl_gem_object *priv;
361 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
362 if (priv == NULL) {
363 DRM_ERROR("Unable to create GEM object\n");
364 return -ENOMEM;
365 }
366
367 obj->driver_private = priv;
368 priv->obj = obj;
369
370 return 0;
371}
372
373void
374kgsl_gem_free_object(struct drm_gem_object *obj)
375{
376 kgsl_gem_free_memory(obj);
377 kgsl_gem_free_mmap_offset(obj);
378 drm_gem_object_release(obj);
379 kfree(obj->driver_private);
380}
381
382static int
383kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
384{
385 struct drm_device *dev = obj->dev;
386 struct drm_gem_mm *mm = dev->mm_private;
387 struct drm_kgsl_gem_object *priv = obj->driver_private;
388 struct drm_map_list *list;
389 int msize;
390
391 list = &obj->map_list;
392 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
393 if (list->map == NULL) {
394 DRM_ERROR("Unable to allocate drm_map_list\n");
395 return -ENOMEM;
396 }
397
398 msize = obj->size * priv->bufcount;
399
400 list->map->type = _DRM_GEM;
401 list->map->size = msize;
402 list->map->handle = obj;
403
404 /* Allocate a mmap offset */
405 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
406 msize / PAGE_SIZE,
407 0, 0);
408
409 if (!list->file_offset_node) {
410 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
411 kfree(list->map);
412 return -ENOMEM;
413 }
414
415 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
416 msize / PAGE_SIZE, 0);
417
418 if (!list->file_offset_node) {
419 DRM_ERROR("Unable to create the file_offset_node\n");
420 kfree(list->map);
421 return -ENOMEM;
422 }
423
424 list->hash.key = list->file_offset_node->start;
425 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
426 DRM_ERROR("Failed to add to map hash\n");
427 drm_mm_put_block(list->file_offset_node);
428 kfree(list->map);
429 return -ENOMEM;
430 }
431
432 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
433
434 return 0;
435}
436
437int
438kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
439 unsigned long *len)
440{
441 struct file *filp;
442 struct drm_device *dev;
443 struct drm_file *file_priv;
444 struct drm_gem_object *obj;
445 struct drm_kgsl_gem_object *priv;
446 int ret = 0;
447
448 filp = fget(drm_fd);
449 if (unlikely(filp == NULL)) {
450 DRM_ERROR("Unable to ghet the DRM file descriptor\n");
451 return -EINVAL;
452 }
453 file_priv = filp->private_data;
454 if (unlikely(file_priv == NULL)) {
455 DRM_ERROR("Unable to get the file private data\n");
456 fput(filp);
457 return -EINVAL;
458 }
459 dev = file_priv->minor->dev;
460 if (unlikely(dev == NULL)) {
461 DRM_ERROR("Unable to get the minor device\n");
462 fput(filp);
463 return -EINVAL;
464 }
465
466 obj = drm_gem_object_lookup(dev, file_priv, handle);
467 if (unlikely(obj == NULL)) {
468 DRM_ERROR("Invalid GEM handle %x\n", handle);
469 fput(filp);
470 return -EBADF;
471 }
472
473 mutex_lock(&dev->struct_mutex);
474 priv = obj->driver_private;
475
476 /* We can only use the MDP for PMEM regions */
477
478 if (TYPE_IS_PMEM(priv->type)) {
479 *start = priv->memdesc.physaddr +
480 priv->bufs[priv->active].offset;
481
482 *len = priv->memdesc.size;
483
484 kgsl_gem_mem_flush(&priv->memdesc,
485 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
486 } else {
487 *start = 0;
488 *len = 0;
489 ret = -EINVAL;
490 }
491
492 drm_gem_object_unreference(obj);
493 mutex_unlock(&dev->struct_mutex);
494
495 fput(filp);
496 return ret;
497}
498
499static int
500kgsl_gem_init_obj(struct drm_device *dev,
501 struct drm_file *file_priv,
502 struct drm_gem_object *obj,
503 int *handle)
504{
505 struct drm_kgsl_gem_object *priv;
506 int ret, i;
507
508 mutex_lock(&dev->struct_mutex);
509 priv = obj->driver_private;
510
511 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
512 priv->bufcount = 1;
513 priv->active = 0;
514 priv->bound = 0;
515
516 /* To preserve backwards compatability, the default memory source
517 is EBI */
518
519 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
520
521 ret = drm_gem_handle_create(file_priv, obj, handle);
522
523 drm_gem_object_handle_unreference(obj);
524 INIT_LIST_HEAD(&priv->wait_list);
525
526 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
527 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
528 priv->wait_entries[i].pid = 0;
529 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
530 }
531
532 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
533 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
534 priv->fence_entries[i].in_use = 0;
535 priv->fence_entries[i].gem_obj = obj;
536 }
537
538 mutex_unlock(&dev->struct_mutex);
539 return ret;
540}
541
542int
543kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
544 struct drm_file *file_priv)
545{
546 struct drm_kgsl_gem_create *create = data;
547 struct drm_gem_object *obj;
548 int ret, handle;
549
550 /* Page align the size so we can allocate multiple buffers */
551 create->size = ALIGN(create->size, 4096);
552
553 obj = drm_gem_object_alloc(dev, create->size);
554
555 if (obj == NULL) {
556 DRM_ERROR("Unable to allocate the GEM object\n");
557 return -ENOMEM;
558 }
559
560 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
561 if (ret)
562 return ret;
563
564 create->handle = handle;
565 return 0;
566}
567
568int
569kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
570 struct drm_file *file_priv)
571{
572 struct drm_kgsl_gem_create_fd *args = data;
573 struct file *file;
574 dev_t rdev;
575 struct fb_info *info;
576 struct drm_gem_object *obj;
577 struct drm_kgsl_gem_object *priv;
578 int ret, put_needed, handle;
579
580 file = fget_light(args->fd, &put_needed);
581
582 if (file == NULL) {
583 DRM_ERROR("Unable to get the file object\n");
584 return -EBADF;
585 }
586
587 rdev = file->f_dentry->d_inode->i_rdev;
588
589 /* Only framebuffer objects are supported ATM */
590
591 if (MAJOR(rdev) != FB_MAJOR) {
592 DRM_ERROR("File descriptor is not a framebuffer\n");
593 ret = -EBADF;
594 goto error_fput;
595 }
596
597 info = registered_fb[MINOR(rdev)];
598
599 if (info == NULL) {
600 DRM_ERROR("Framebuffer minor %d is not registered\n",
601 MINOR(rdev));
602 ret = -EBADF;
603 goto error_fput;
604 }
605
606 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
607
608 if (obj == NULL) {
609 DRM_ERROR("Unable to allocate GEM object\n");
610 ret = -ENOMEM;
611 goto error_fput;
612 }
613
614 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
615
616 if (ret)
617 goto error_fput;
618
619 mutex_lock(&dev->struct_mutex);
620
621 priv = obj->driver_private;
622 priv->memdesc.physaddr = info->fix.smem_start;
623 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
624
625 mutex_unlock(&dev->struct_mutex);
626 args->handle = handle;
627
628error_fput:
629 fput_light(file, put_needed);
630
631 return ret;
632}
633
634int
635kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
636 struct drm_file *file_priv)
637{
638 struct drm_kgsl_gem_memtype *args = data;
639 struct drm_gem_object *obj;
640 struct drm_kgsl_gem_object *priv;
641 int ret = 0;
642
643 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
644
645 if (obj == NULL) {
646 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
647 return -EBADF;
648 }
649
650 mutex_lock(&dev->struct_mutex);
651 priv = obj->driver_private;
652
653 if (TYPE_IS_FD(priv->type))
654 ret = -EINVAL;
655 else {
656 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
657 priv->type = args->type;
658 else
659 ret = -EINVAL;
660 }
661
662 drm_gem_object_unreference(obj);
663 mutex_unlock(&dev->struct_mutex);
664
665 return ret;
666}
667
668int
669kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *file_priv)
671{
672 struct drm_kgsl_gem_memtype *args = data;
673 struct drm_gem_object *obj;
674 struct drm_kgsl_gem_object *priv;
675
676 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
677
678 if (obj == NULL) {
679 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
680 return -EBADF;
681 }
682
683 mutex_lock(&dev->struct_mutex);
684 priv = obj->driver_private;
685
686 args->type = priv->type;
687
688 drm_gem_object_unreference(obj);
689 mutex_unlock(&dev->struct_mutex);
690
691 return 0;
692}
693
694int
695kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
696 struct drm_file *file_priv)
697{
698 struct drm_kgsl_gem_bind_gpu *args = data;
699 struct drm_gem_object *obj;
700 struct drm_kgsl_gem_object *priv;
701
702 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
703
704 if (obj == NULL) {
705 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
706 return -EBADF;
707 }
708
709 mutex_lock(&dev->struct_mutex);
710 priv = obj->driver_private;
711
712 if (--priv->bound == 0)
713 kgsl_gem_unmap(obj);
714
715 drm_gem_object_unreference(obj);
716 mutex_unlock(&dev->struct_mutex);
717 return 0;
718}
719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700720static int
721kgsl_gem_map(struct drm_gem_object *obj)
722{
723 struct drm_kgsl_gem_object *priv = obj->driver_private;
724 int index;
725 int ret = -EINVAL;
726
727 if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
728 return 0;
729
730 /* Get the global page table */
731
732 if (priv->pagetable == NULL) {
733 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
734
735 if (priv->pagetable == NULL) {
736 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
737 return -EINVAL;
738 }
739 }
740
741 priv->memdesc.pagetable = priv->pagetable;
742
743 ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
744 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
745
746 if (!ret) {
747 for (index = 0; index < priv->bufcount; index++) {
748 priv->bufs[index].gpuaddr =
749 priv->memdesc.gpuaddr +
750 priv->bufs[index].offset;
751 }
752 }
753
754 /* Add cached memory to the list to be cached */
755
756 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
757 priv->type & DRM_KGSL_GEM_CACHE_MASK)
758 list_add(&priv->list, &kgsl_mem_list);
759
760 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
761
762 return ret;
763}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764
765int
766kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
767 struct drm_file *file_priv)
768{
769 struct drm_kgsl_gem_bind_gpu *args = data;
770 struct drm_gem_object *obj;
771 struct drm_kgsl_gem_object *priv;
772 int ret = 0;
773
774 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
775
776 if (obj == NULL) {
777 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
778 return -EBADF;
779 }
780
781 mutex_lock(&dev->struct_mutex);
782 priv = obj->driver_private;
783
784 if (priv->bound++ == 0) {
785
786 if (!kgsl_gem_memory_allocated(obj)) {
787 DRM_ERROR("Memory not allocated for this object\n");
788 ret = -ENOMEM;
789 goto out;
790 }
791
792 ret = kgsl_gem_map(obj);
793
794 /* This is legacy behavior - use GET_BUFFERINFO instead */
795 args->gpuptr = priv->bufs[0].gpuaddr;
796 }
797out:
798 drm_gem_object_unreference(obj);
799 mutex_unlock(&dev->struct_mutex);
800 return ret;
801}
802
803/* Allocate the memory and prepare it for CPU mapping */
804
805int
806kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
807 struct drm_file *file_priv)
808{
809 struct drm_kgsl_gem_alloc *args = data;
810 struct drm_gem_object *obj;
811 struct drm_kgsl_gem_object *priv;
812 int ret;
813
814 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
815
816 if (obj == NULL) {
817 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
818 return -EBADF;
819 }
820
821 mutex_lock(&dev->struct_mutex);
822 priv = obj->driver_private;
823
824 ret = kgsl_gem_alloc_memory(obj);
825
826 if (ret) {
827 DRM_ERROR("Unable to allocate object memory\n");
828 } else if (!priv->mmap_offset) {
829 ret = kgsl_gem_create_mmap_offset(obj);
830 if (ret)
831 DRM_ERROR("Unable to create a mmap offset\n");
832 }
833
834 args->offset = priv->mmap_offset;
835
836 drm_gem_object_unreference(obj);
837 mutex_unlock(&dev->struct_mutex);
838
839 return ret;
840}
841
842int
843kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file_priv)
845{
846 struct drm_kgsl_gem_mmap *args = data;
847 struct drm_gem_object *obj;
848 unsigned long addr;
849
850 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
851
852 if (obj == NULL) {
853 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
854 return -EBADF;
855 }
856
857 down_write(&current->mm->mmap_sem);
858
859 addr = do_mmap(obj->filp, 0, args->size,
860 PROT_READ | PROT_WRITE, MAP_SHARED,
861 args->offset);
862
863 up_write(&current->mm->mmap_sem);
864
865 mutex_lock(&dev->struct_mutex);
866 drm_gem_object_unreference(obj);
867 mutex_unlock(&dev->struct_mutex);
868
869 if (IS_ERR((void *) addr))
870 return addr;
871
872 args->hostptr = (uint32_t) addr;
873 return 0;
874}
875
876/* This function is deprecated */
877
878int
879kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
880 struct drm_file *file_priv)
881{
882 struct drm_kgsl_gem_prep *args = data;
883 struct drm_gem_object *obj;
884 struct drm_kgsl_gem_object *priv;
885 int ret;
886
887 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
888
889 if (obj == NULL) {
890 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
891 return -EBADF;
892 }
893
894 mutex_lock(&dev->struct_mutex);
895 priv = obj->driver_private;
896
897 ret = kgsl_gem_alloc_memory(obj);
898 if (ret) {
899 DRM_ERROR("Unable to allocate object memory\n");
900 drm_gem_object_unreference(obj);
901 mutex_unlock(&dev->struct_mutex);
902 return ret;
903 }
904
905 if (priv->mmap_offset == 0) {
906 ret = kgsl_gem_create_mmap_offset(obj);
907 if (ret) {
908 drm_gem_object_unreference(obj);
909 mutex_unlock(&dev->struct_mutex);
910 return ret;
911 }
912 }
913
914 args->offset = priv->mmap_offset;
915 args->phys = priv->memdesc.physaddr;
916
917 drm_gem_object_unreference(obj);
918 mutex_unlock(&dev->struct_mutex);
919
920 return 0;
921}
922
923int
924kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
925 struct drm_file *file_priv)
926{
927 struct drm_kgsl_gem_bufinfo *args = data;
928 struct drm_gem_object *obj;
929 struct drm_kgsl_gem_object *priv;
930 int ret = -EINVAL;
931 int index;
932
933 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
934
935 if (obj == NULL) {
936 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
937 return -EBADF;
938 }
939
940 mutex_lock(&dev->struct_mutex);
941 priv = obj->driver_private;
942
943 if (!kgsl_gem_memory_allocated(obj)) {
944 DRM_ERROR("Memory not allocated for this object\n");
945 goto out;
946 }
947
948 for (index = 0; index < priv->bufcount; index++) {
949 args->offset[index] = priv->bufs[index].offset;
950 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
951 }
952
953 args->count = priv->bufcount;
954 args->active = priv->active;
955
956 ret = 0;
957
958out:
959 drm_gem_object_unreference(obj);
960 mutex_unlock(&dev->struct_mutex);
961
962 return ret;
963}
964
965int
966kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
967 struct drm_file *file_priv)
968{
969 struct drm_kgsl_gem_bufcount *args = data;
970 struct drm_gem_object *obj;
971 struct drm_kgsl_gem_object *priv;
972 int ret = -EINVAL;
973
974 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
975 return -EINVAL;
976
977 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
978
979 if (obj == NULL) {
980 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
981 return -EBADF;
982 }
983
984 mutex_lock(&dev->struct_mutex);
985 priv = obj->driver_private;
986
987 /* It is too much math to worry about what happens if we are already
988 allocated, so just bail if we are */
989
990 if (kgsl_gem_memory_allocated(obj)) {
991 DRM_ERROR("Memory already allocated - cannot change"
992 "number of buffers\n");
993 goto out;
994 }
995
996 priv->bufcount = args->bufcount;
997 ret = 0;
998
999out:
1000 drm_gem_object_unreference(obj);
1001 mutex_unlock(&dev->struct_mutex);
1002
1003 return ret;
1004}
1005
1006int
1007kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
1008 struct drm_file *file_priv)
1009{
1010 struct drm_kgsl_gem_active *args = data;
1011 struct drm_gem_object *obj;
1012 struct drm_kgsl_gem_object *priv;
1013 int ret = -EINVAL;
1014
1015 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1016
1017 if (obj == NULL) {
1018 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1019 return -EBADF;
1020 }
1021
1022 mutex_lock(&dev->struct_mutex);
1023 priv = obj->driver_private;
1024
1025 if (args->active < 0 || args->active >= priv->bufcount) {
1026 DRM_ERROR("Invalid active buffer %d\n", args->active);
1027 goto out;
1028 }
1029
1030 priv->active = args->active;
1031 ret = 0;
1032
1033out:
1034 drm_gem_object_unreference(obj);
1035 mutex_unlock(&dev->struct_mutex);
1036
1037 return ret;
1038}
1039
1040int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1041{
1042 struct drm_gem_object *obj = vma->vm_private_data;
1043 struct drm_device *dev = obj->dev;
1044 struct drm_kgsl_gem_object *priv;
1045 unsigned long offset, pg;
1046 struct page *page;
1047
1048 mutex_lock(&dev->struct_mutex);
1049
1050 priv = obj->driver_private;
1051
1052 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
1053 pg = (unsigned long) priv->memdesc.hostptr + offset;
1054
1055 page = vmalloc_to_page((void *) pg);
1056 if (!page) {
1057 mutex_unlock(&dev->struct_mutex);
1058 return VM_FAULT_SIGBUS;
1059 }
1060
1061 get_page(page);
1062 vmf->page = page;
1063
1064 mutex_unlock(&dev->struct_mutex);
1065 return 0;
1066}
1067
1068int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1069{
1070 struct drm_gem_object *obj = vma->vm_private_data;
1071 struct drm_device *dev = obj->dev;
1072 struct drm_kgsl_gem_object *priv;
1073 unsigned long offset, pfn;
1074 int ret = 0;
1075
1076 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
1077 PAGE_SHIFT;
1078
1079 mutex_lock(&dev->struct_mutex);
1080
1081 priv = obj->driver_private;
1082
1083 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
1084 ret = vm_insert_pfn(vma,
1085 (unsigned long) vmf->virtual_address, pfn);
1086 mutex_unlock(&dev->struct_mutex);
1087
1088 switch (ret) {
1089 case -ENOMEM:
1090 case -EAGAIN:
1091 return VM_FAULT_OOM;
1092 case -EFAULT:
1093 return VM_FAULT_SIGBUS;
1094 default:
1095 return VM_FAULT_NOPAGE;
1096 }
1097}
1098
1099static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
1100 .fault = kgsl_gem_kmem_fault,
1101 .open = drm_gem_vm_open,
1102 .close = drm_gem_vm_close,
1103};
1104
1105static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
1106 .fault = kgsl_gem_phys_fault,
1107 .open = drm_gem_vm_open,
1108 .close = drm_gem_vm_close,
1109};
1110
1111/* This is a clone of the standard drm_gem_mmap function modified to allow
1112 us to properly map KMEM regions as well as the PMEM regions */
1113
1114int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1115{
1116 struct drm_file *priv = filp->private_data;
1117 struct drm_device *dev = priv->minor->dev;
1118 struct drm_gem_mm *mm = dev->mm_private;
1119 struct drm_local_map *map = NULL;
1120 struct drm_gem_object *obj;
1121 struct drm_hash_item *hash;
1122 struct drm_kgsl_gem_object *gpriv;
1123 int ret = 0;
1124
1125 mutex_lock(&dev->struct_mutex);
1126
1127 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
1128 mutex_unlock(&dev->struct_mutex);
1129 return drm_mmap(filp, vma);
1130 }
1131
1132 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
1133 if (!map ||
1134 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
1135 ret = -EPERM;
1136 goto out_unlock;
1137 }
1138
1139 /* Check for valid size. */
1140 if (map->size < vma->vm_end - vma->vm_start) {
1141 ret = -EINVAL;
1142 goto out_unlock;
1143 }
1144
1145 obj = map->handle;
1146
1147 gpriv = obj->driver_private;
1148
1149 /* VM_PFNMAP is only for memory that doesn't use struct page
1150 * in other words, not "normal" memory. If you try to use it
1151 * with "normal" memory then the mappings don't get flushed. */
1152
1153 if (TYPE_IS_MEM(gpriv->type)) {
1154 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1155 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1156 } else {
1157 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1158 VM_DONTEXPAND;
1159 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1160 }
1161
1162 vma->vm_private_data = map->handle;
1163
1164
1165 /* Take care of requested caching policy */
1166 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1167 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1168 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1169 vma->vm_page_prot =
1170 pgprot_writebackwacache(vma->vm_page_prot);
1171 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1172 vma->vm_page_prot =
1173 pgprot_writebackcache(vma->vm_page_prot);
1174 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1175 vma->vm_page_prot =
1176 pgprot_writethroughcache(vma->vm_page_prot);
1177 else
1178 vma->vm_page_prot =
1179 pgprot_writecombine(vma->vm_page_prot);
1180 } else {
1181 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1182 vma->vm_page_prot =
1183 pgprot_noncached(vma->vm_page_prot);
1184 else
1185 /* default pmem is WC */
1186 vma->vm_page_prot =
1187 pgprot_writecombine(vma->vm_page_prot);
1188 }
1189
1190 /* flush out existing KMEM cached mappings if new ones are
1191 * of uncached type */
1192 if (IS_MEM_UNCACHED(gpriv->type))
1193 kgsl_cache_range_op(&gpriv->memdesc,
1194 KGSL_CACHE_OP_FLUSH);
1195
1196 /* Add the other memory types here */
1197
1198 /* Take a ref for this mapping of the object, so that the fault
1199 * handler can dereference the mmap offset's pointer to the object.
1200 * This reference is cleaned up by the corresponding vm_close
1201 * (which should happen whether the vma was created by this call, or
1202 * by a vm_open due to mremap or partial unmap or whatever).
1203 */
1204 drm_gem_object_reference(obj);
1205
1206 vma->vm_file = filp; /* Needed for drm_vm_open() */
1207 drm_vm_open_locked(vma);
1208
1209out_unlock:
1210 mutex_unlock(&dev->struct_mutex);
1211
1212 return ret;
1213}
1214
1215void
1216cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1217{
1218 int j;
1219 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1220 struct drm_kgsl_gem_object *unlock_obj;
1221 struct drm_gem_object *obj;
1222 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1223
1224 fence->ts_valid = 0;
1225 fence->timestamp = -1;
1226 fence->ts_device = -1;
1227
1228 /* Walk the list of buffers in this fence and clean up the */
1229 /* references. Note that this can cause memory allocations */
1230 /* to be freed */
1231 for (j = fence->num_buffers; j > 0; j--) {
1232 this_fence_entry =
1233 (struct drm_kgsl_gem_object_fence_list_entry *)
1234 fence->buffers_in_fence.prev;
1235
1236 this_fence_entry->in_use = 0;
1237 obj = this_fence_entry->gem_obj;
1238 unlock_obj = obj->driver_private;
1239
1240 /* Delete it from the list */
1241
1242 list_del(&this_fence_entry->list);
1243
1244 /* we are unlocking - see if there are other pids waiting */
1245 if (check_waiting) {
1246 if (!list_empty(&unlock_obj->wait_list)) {
1247 lock_next =
1248 (struct drm_kgsl_gem_object_wait_list_entry *)
1249 unlock_obj->wait_list.prev;
1250
1251 list_del((struct list_head *)&lock_next->list);
1252
1253 unlock_obj->lockpid = 0;
1254 wake_up_interruptible(
1255 &lock_next->process_wait_q);
1256 lock_next->pid = 0;
1257
1258 } else {
1259 /* List is empty so set pid to 0 */
1260 unlock_obj->lockpid = 0;
1261 }
1262 }
1263
1264 drm_gem_object_unreference(obj);
1265 }
1266 /* here all the buffers in the fence are released */
1267 /* clear the fence entry */
1268 fence->fence_id = ENTRY_EMPTY;
1269}
1270
1271int
1272find_empty_fence(void)
1273{
1274 int i;
1275
1276 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1277 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1278 gem_buf_fence[i].fence_id = fence_id++;
1279 gem_buf_fence[i].ts_valid = 0;
1280 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1281 if (fence_id == 0xFFFFFFF0)
1282 fence_id = 1;
1283 return i;
1284 } else {
1285
1286 /* Look for entries to be cleaned up */
1287 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1288 cleanup_fence(&gem_buf_fence[i], 0);
1289 }
1290 }
1291
1292 return ENTRY_EMPTY;
1293}
1294
1295int
1296find_fence(int index)
1297{
1298 int i;
1299
1300 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1301 if (gem_buf_fence[i].fence_id == index)
1302 return i;
1303 }
1304
1305 return ENTRY_EMPTY;
1306}
1307
1308void
1309wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1310{
1311 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1312 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1313 struct drm_kgsl_gem_object *unlock_obj;
1314 struct drm_gem_object *obj;
1315
1316 /* TS has expired when we get here */
1317 fence->ts_valid = 0;
1318 fence->timestamp = -1;
1319 fence->ts_device = -1;
1320
1321 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1322 obj = this_fence_entry->gem_obj;
1323 unlock_obj = obj->driver_private;
1324
1325 if (!list_empty(&unlock_obj->wait_list)) {
1326 lock_next =
1327 (struct drm_kgsl_gem_object_wait_list_entry *)
1328 unlock_obj->wait_list.prev;
1329
1330 /* Unblock the pid */
1331 lock_next->pid = 0;
1332
1333 /* Delete it from the list */
1334 list_del((struct list_head *)&lock_next->list);
1335
1336 unlock_obj->lockpid = 0;
1337 wake_up_interruptible(&lock_next->process_wait_q);
1338
1339 } else {
1340 /* List is empty so set pid to 0 */
1341 unlock_obj->lockpid = 0;
1342 }
1343 }
1344 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1345}
1346
1347static int kgsl_ts_notifier_cb(struct notifier_block *blk,
1348 unsigned long code, void *_param)
1349{
1350 struct drm_kgsl_gem_object_fence *fence;
1351 struct kgsl_device *device = kgsl_get_device(code);
1352 int i;
1353
1354 /* loop through the fences to see what things can be processed */
1355
1356 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1357 fence = &gem_buf_fence[i];
1358 if (!fence->ts_valid || fence->ts_device != code)
1359 continue;
1360
1361 if (kgsl_check_timestamp(device, fence->timestamp))
1362 wakeup_fence_entries(fence);
1363 }
1364
1365 return 0;
1366}
1367
1368int
1369kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1370 struct drm_file *file_priv)
1371{
1372 /* The purpose of this function is to lock a given set of handles. */
1373 /* The driver will maintain a list of locked handles. */
1374 /* If a request comes in for a handle that's locked the thread will */
1375 /* block until it's no longer in use. */
1376
1377 struct drm_kgsl_gem_lock_handles *args = data;
1378 struct drm_gem_object *obj;
1379 struct drm_kgsl_gem_object *priv;
1380 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1381 struct drm_kgsl_gem_object_fence *fence;
1382 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1383 int i, j;
1384 int result = 0;
1385 uint32_t *lock_list;
1386 uint32_t *work_list = NULL;
1387 int32_t fence_index;
1388
1389 /* copy in the data from user space */
1390 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1391 if (!lock_list) {
1392 DRM_ERROR("Unable allocate memory for lock list\n");
1393 result = -ENOMEM;
1394 goto error;
1395 }
1396
1397 if (copy_from_user(lock_list, args->handle_list,
1398 sizeof(uint32_t) * args->num_handles)) {
1399 DRM_ERROR("Unable to copy the lock list from the user\n");
1400 result = -EFAULT;
1401 goto free_handle_list;
1402 }
1403
1404
1405 work_list = lock_list;
1406 mutex_lock(&dev->struct_mutex);
1407
1408 /* build the fence for this group of handles */
1409 fence_index = find_empty_fence();
1410 if (fence_index == ENTRY_EMPTY) {
1411 DRM_ERROR("Unable to find a empty fence\n");
1412 args->lock_id = 0xDEADBEEF;
1413 result = -EFAULT;
1414 goto out_unlock;
1415 }
1416
1417 fence = &gem_buf_fence[fence_index];
1418 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1419 args->lock_id = gem_buf_fence[fence_index].fence_id;
1420
1421 for (j = args->num_handles; j > 0; j--, lock_list++) {
1422 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1423
1424 if (obj == NULL) {
1425 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1426 result = -EBADF;
1427 goto out_unlock;
1428 }
1429
1430 priv = obj->driver_private;
1431 this_fence_entry = NULL;
1432
1433 /* get a fence entry to hook into the fence */
1434 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1435 if (!priv->fence_entries[i].in_use) {
1436 this_fence_entry = &priv->fence_entries[i];
1437 this_fence_entry->in_use = 1;
1438 break;
1439 }
1440 }
1441
1442 if (this_fence_entry == NULL) {
1443 fence->num_buffers = 0;
1444 fence->fence_id = ENTRY_EMPTY;
1445 args->lock_id = 0xDEADBEAD;
1446 result = -EFAULT;
1447 drm_gem_object_unreference(obj);
1448 goto out_unlock;
1449 }
1450
1451 /* We're trying to lock - add to a fence */
1452 list_add((struct list_head *)this_fence_entry,
1453 &gem_buf_fence[fence_index].buffers_in_fence);
1454 if (priv->lockpid) {
1455
1456 if (priv->lockpid == args->pid) {
1457 /* now that things are running async this */
1458 /* happens when an op isn't done */
1459 /* so it's already locked by the calling pid */
1460 continue;
1461 }
1462
1463
1464 /* if a pid already had it locked */
1465 /* create and add to wait list */
1466 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1467 if (priv->wait_entries[i].in_use == 0) {
1468 /* this one is empty */
1469 lock_item = &priv->wait_entries[i];
1470 lock_item->in_use = 1;
1471 lock_item->pid = args->pid;
1472 INIT_LIST_HEAD((struct list_head *)
1473 &priv->wait_entries[i]);
1474 break;
1475 }
1476 }
1477
1478 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1479
1480 result = -EFAULT;
1481 drm_gem_object_unreference(obj);
1482 goto out_unlock;
1483 }
1484
1485 list_add_tail((struct list_head *)&lock_item->list,
1486 &priv->wait_list);
1487 mutex_unlock(&dev->struct_mutex);
1488 /* here we need to block */
1489 wait_event_interruptible_timeout(
1490 priv->wait_entries[i].process_wait_q,
1491 (priv->lockpid == 0),
1492 msecs_to_jiffies(64));
1493 mutex_lock(&dev->struct_mutex);
1494 lock_item->in_use = 0;
1495 }
1496
1497 /* Getting here means no one currently holds the lock */
1498 priv->lockpid = args->pid;
1499
1500 args->lock_id = gem_buf_fence[fence_index].fence_id;
1501 }
1502 fence->lockpid = args->pid;
1503
1504out_unlock:
1505 mutex_unlock(&dev->struct_mutex);
1506
1507free_handle_list:
1508 kfree(work_list);
1509
1510error:
1511 return result;
1512}
1513
1514int
1515kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1516 struct drm_file *file_priv)
1517{
1518 struct drm_kgsl_gem_unlock_handles *args = data;
1519 int result = 0;
1520 int32_t fence_index;
1521
1522 mutex_lock(&dev->struct_mutex);
1523 fence_index = find_fence(args->lock_id);
1524 if (fence_index == ENTRY_EMPTY) {
1525 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1526 result = -EFAULT;
1527 goto out_unlock;
1528 }
1529
1530 cleanup_fence(&gem_buf_fence[fence_index], 1);
1531
1532out_unlock:
1533 mutex_unlock(&dev->struct_mutex);
1534
1535 return result;
1536}
1537
1538
1539int
1540kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1541 struct drm_file *file_priv)
1542{
1543 struct drm_kgsl_gem_unlock_on_ts *args = data;
1544 int result = 0;
1545 int ts_done = 0;
1546 int32_t fence_index, ts_device;
1547 struct drm_kgsl_gem_object_fence *fence;
1548 struct kgsl_device *device;
1549
1550 if (args->type == DRM_KGSL_GEM_TS_3D)
1551 ts_device = KGSL_DEVICE_3D0;
1552 else if (args->type == DRM_KGSL_GEM_TS_2D)
1553 ts_device = KGSL_DEVICE_2D0;
1554 else {
1555 result = -EINVAL;
1556 goto error;
1557 }
1558
1559 device = kgsl_get_device(ts_device);
1560 ts_done = kgsl_check_timestamp(device, args->timestamp);
1561
1562 mutex_lock(&dev->struct_mutex);
1563
1564 fence_index = find_fence(args->lock_id);
1565 if (fence_index == ENTRY_EMPTY) {
1566 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1567 result = -EFAULT;
1568 goto out_unlock;
1569 }
1570
1571 fence = &gem_buf_fence[fence_index];
1572 fence->ts_device = ts_device;
1573
1574 if (!ts_done)
1575 fence->ts_valid = 1;
1576 else
1577 cleanup_fence(fence, 1);
1578
1579
1580out_unlock:
1581 mutex_unlock(&dev->struct_mutex);
1582
1583error:
1584 return result;
1585}
1586
1587struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1588 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1589 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1590 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1591 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1592 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1593 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1594 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1595 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1596 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1597 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1598 kgsl_gem_set_bufcount_ioctl, 0),
1599 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1600 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1601 kgsl_gem_lock_handle_ioctl, 0),
1602 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1603 kgsl_gem_unlock_handle_ioctl, 0),
1604 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1605 kgsl_gem_unlock_on_ts_ioctl, 0),
1606 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1607 DRM_MASTER),
1608};
1609
1610static struct drm_driver driver = {
1611 .driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
1612 .load = kgsl_drm_load,
1613 .unload = kgsl_drm_unload,
1614 .firstopen = kgsl_drm_firstopen,
1615 .lastclose = kgsl_drm_lastclose,
1616 .preclose = kgsl_drm_preclose,
1617 .suspend = kgsl_drm_suspend,
1618 .resume = kgsl_drm_resume,
1619 .reclaim_buffers = drm_core_reclaim_buffers,
1620 .gem_init_object = kgsl_gem_init_object,
1621 .gem_free_object = kgsl_gem_free_object,
1622 .ioctls = kgsl_drm_ioctls,
1623
1624 .fops = {
1625 .owner = THIS_MODULE,
1626 .open = drm_open,
1627 .release = drm_release,
1628 .unlocked_ioctl = drm_ioctl,
1629 .mmap = msm_drm_gem_mmap,
1630 .poll = drm_poll,
1631 .fasync = drm_fasync,
1632 },
1633
1634 .name = DRIVER_NAME,
1635 .desc = DRIVER_DESC,
1636 .date = DRIVER_DATE,
1637 .major = DRIVER_MAJOR,
1638 .minor = DRIVER_MINOR,
1639 .patchlevel = DRIVER_PATCHLEVEL,
1640};
1641
1642int kgsl_drm_init(struct platform_device *dev)
1643{
1644 int i;
1645
1646 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
1647 driver.platform_device = dev;
1648
1649 INIT_LIST_HEAD(&kgsl_mem_list);
1650
1651 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1652 gem_buf_fence[i].num_buffers = 0;
1653 gem_buf_fence[i].ts_valid = 0;
1654 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1655 }
1656
1657 return drm_init(&driver);
1658}
1659
1660void kgsl_drm_exit(void)
1661{
1662 drm_exit(&driver);
1663}