blob: 202783b3dc73fbd254088de46133e5d3b102bcbe [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
20#include <linux/notifier.h>
21
22#include "kgsl.h"
23#include "kgsl_device.h"
24#include "kgsl_drm.h"
25#include "kgsl_mmu.h"
26#include "kgsl_sharedmem.h"
27
28#define DRIVER_AUTHOR "Qualcomm"
29#define DRIVER_NAME "kgsl"
30#define DRIVER_DESC "KGSL DRM"
31#define DRIVER_DATE "20100127"
32
33#define DRIVER_MAJOR 2
34#define DRIVER_MINOR 1
35#define DRIVER_PATCHLEVEL 1
36
37#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
38
39#define ENTRY_EMPTY -1
40#define ENTRY_NEEDS_CLEANUP -2
41
42#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
43#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
44
45/* Returns true if the memory type is in PMEM */
46
47#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
48#define TYPE_IS_PMEM(_t) \
49 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
50 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
51 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
52#else
53#define TYPE_IS_PMEM(_t) \
54 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
55 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
56#endif
57
58/* Returns true if the memory type is regular */
59
60#define TYPE_IS_MEM(_t) \
61 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
62 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
63 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
64
65#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
66
67/* Returns true if KMEM region is uncached */
68
69#define IS_MEM_UNCACHED(_t) \
70 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
71 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
72 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
73
74struct drm_kgsl_gem_object_wait_list_entry {
75 struct list_head list;
76 int pid;
77 int in_use;
78 wait_queue_head_t process_wait_q;
79};
80
81struct drm_kgsl_gem_object_fence {
82 int32_t fence_id;
83 unsigned int num_buffers;
84 int ts_valid;
85 unsigned int timestamp;
86 int ts_device;
87 int lockpid;
88 struct list_head buffers_in_fence;
89};
90
91struct drm_kgsl_gem_object_fence_list_entry {
92 struct list_head list;
93 int in_use;
94 struct drm_gem_object *gem_obj;
95};
96
97static int32_t fence_id = 0x1;
98
99static struct drm_kgsl_gem_object_fence
100 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
101
102struct drm_kgsl_gem_object {
103 struct drm_gem_object *obj;
104 uint32_t type;
105 struct kgsl_memdesc memdesc;
106 struct kgsl_pagetable *pagetable;
107 uint64_t mmap_offset;
108 int bufcount;
109 int flags;
110 struct list_head list;
111 int active;
112
113 struct {
114 uint32_t offset;
115 uint32_t gpuaddr;
116 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
117
118 int bound;
119 int lockpid;
120 /* Put these here to avoid allocing all the time */
121 struct drm_kgsl_gem_object_wait_list_entry
122 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
123 /* Each object can only appear in a single fence */
124 struct drm_kgsl_gem_object_fence_list_entry
125 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
126
127 struct list_head wait_list;
128};
129
130/* This is a global list of all the memory currently mapped in the MMU */
131static struct list_head kgsl_mem_list;
132
133static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
134{
135 int cacheop = 0;
136
137 switch (op) {
138 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
139 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
140 DRM_KGSL_GEM_CACHE_WBACKWA))
141 cacheop = KGSL_CACHE_OP_CLEAN;
142
143 break;
144
145 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
146 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
147 DRM_KGSL_GEM_CACHE_WBACKWA |
148 DRM_KGSL_GEM_CACHE_WTHROUGH))
149 cacheop = KGSL_CACHE_OP_INV;
150 }
151
152 kgsl_cache_range_op(memdesc, cacheop);
153}
154
155/* Flush all the memory mapped in the MMU */
156
157void kgsl_gpu_mem_flush(int op)
158{
159 struct drm_kgsl_gem_object *entry;
160
161 list_for_each_entry(entry, &kgsl_mem_list, list) {
162 kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
163 }
164
165 /* Takes care of WT/WC case.
166 * More useful when we go barrierless
167 */
168 dmb();
169}
170
171/* TODO:
172 * Add vsync wait */
173
174static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
175{
176 return 0;
177}
178
179static int kgsl_drm_unload(struct drm_device *dev)
180{
181 return 0;
182}
183
184struct kgsl_drm_device_priv {
185 struct kgsl_device *device[KGSL_DEVICE_MAX];
186 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
187};
188
189static int kgsl_ts_notifier_cb(struct notifier_block *blk,
190 unsigned long code, void *_param);
191
192static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
193
194static int kgsl_drm_firstopen(struct drm_device *dev)
195{
196 int i;
197
198 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
199 struct kgsl_device *device = kgsl_get_device(i);
200
201 if (device == NULL)
202 continue;
203
204 kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
205 kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
206 }
207
208 return 0;
209}
210
211void kgsl_drm_lastclose(struct drm_device *dev)
212{
213 int i;
214
215 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
216 struct kgsl_device *device = kgsl_get_device(i);
217 if (device == NULL)
218 continue;
219
220 kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
221 }
222}
223
224void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
225{
226}
227
228static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
229{
230 return 0;
231}
232
233static int kgsl_drm_resume(struct drm_device *dev)
234{
235 return 0;
236}
237
238static void
239kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
240{
241 struct drm_device *dev = obj->dev;
242 struct drm_gem_mm *mm = dev->mm_private;
243 struct drm_kgsl_gem_object *priv = obj->driver_private;
244 struct drm_map_list *list;
245
246 list = &obj->map_list;
247 drm_ht_remove_item(&mm->offset_hash, &list->hash);
248 if (list->file_offset_node) {
249 drm_mm_put_block(list->file_offset_node);
250 list->file_offset_node = NULL;
251 }
252
253 kfree(list->map);
254 list->map = NULL;
255
256 priv->mmap_offset = 0;
257}
258
259static int
260kgsl_gem_memory_allocated(struct drm_gem_object *obj)
261{
262 struct drm_kgsl_gem_object *priv = obj->driver_private;
263 return priv->memdesc.size ? 1 : 0;
264}
265
266static int
267kgsl_gem_alloc_memory(struct drm_gem_object *obj)
268{
269 struct drm_kgsl_gem_object *priv = obj->driver_private;
270 int index;
271
272 /* Return if the memory is already allocated */
273
274 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
275 return 0;
276
277 if (TYPE_IS_PMEM(priv->type)) {
278 int type;
279
280 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
281 priv->type & DRM_KGSL_GEM_PMEM_EBI)
282 type = PMEM_MEMTYPE_EBI1;
283 else
284 type = PMEM_MEMTYPE_SMI;
285
286 priv->memdesc.physaddr =
287 pmem_kalloc(obj->size * priv->bufcount,
288 type | PMEM_ALIGNMENT_4K);
289
290 if (IS_ERR((void *) priv->memdesc.physaddr)) {
291 DRM_ERROR("Unable to allocate PMEM memory\n");
292 return -ENOMEM;
293 }
294
295 priv->memdesc.size = obj->size * priv->bufcount;
296 priv->memdesc.ops = &kgsl_contiguous_ops;
297
298 } else if (TYPE_IS_MEM(priv->type)) {
299 priv->memdesc.hostptr =
300 vmalloc_user(obj->size * priv->bufcount);
301
302 if (priv->memdesc.hostptr == NULL) {
303 DRM_ERROR("Unable to allocate vmalloc memory\n");
304 return -ENOMEM;
305 }
306
307 priv->memdesc.size = obj->size * priv->bufcount;
308 priv->memdesc.ops = &kgsl_vmalloc_ops;
309 } else
310 return -EINVAL;
311
312 for (index = 0; index < priv->bufcount; index++)
313 priv->bufs[index].offset = index * obj->size;
314
315
316 return 0;
317}
318
319#ifdef CONFIG_MSM_KGSL_MMU
320static void
321kgsl_gem_unmap(struct drm_gem_object *obj)
322{
323 struct drm_kgsl_gem_object *priv = obj->driver_private;
324
325 if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
326 return;
327
328 kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
329
330 kgsl_mmu_putpagetable(priv->pagetable);
331 priv->pagetable = NULL;
332
333 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
334 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
335 list_del(&priv->list);
336
337 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
338}
339#else
340static void
341kgsl_gem_unmap(struct drm_gem_object *obj)
342{
343}
344#endif
345
346static void
347kgsl_gem_free_memory(struct drm_gem_object *obj)
348{
349 struct drm_kgsl_gem_object *priv = obj->driver_private;
350
351 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
352 return;
353
354 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
355 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
356
357 kgsl_gem_unmap(obj);
358
359 if (TYPE_IS_PMEM(priv->type))
360 pmem_kfree(priv->memdesc.physaddr);
361
362 kgsl_sharedmem_free(&priv->memdesc);
363}
364
365int
366kgsl_gem_init_object(struct drm_gem_object *obj)
367{
368 struct drm_kgsl_gem_object *priv;
369 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
370 if (priv == NULL) {
371 DRM_ERROR("Unable to create GEM object\n");
372 return -ENOMEM;
373 }
374
375 obj->driver_private = priv;
376 priv->obj = obj;
377
378 return 0;
379}
380
381void
382kgsl_gem_free_object(struct drm_gem_object *obj)
383{
384 kgsl_gem_free_memory(obj);
385 kgsl_gem_free_mmap_offset(obj);
386 drm_gem_object_release(obj);
387 kfree(obj->driver_private);
388}
389
390static int
391kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
392{
393 struct drm_device *dev = obj->dev;
394 struct drm_gem_mm *mm = dev->mm_private;
395 struct drm_kgsl_gem_object *priv = obj->driver_private;
396 struct drm_map_list *list;
397 int msize;
398
399 list = &obj->map_list;
400 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
401 if (list->map == NULL) {
402 DRM_ERROR("Unable to allocate drm_map_list\n");
403 return -ENOMEM;
404 }
405
406 msize = obj->size * priv->bufcount;
407
408 list->map->type = _DRM_GEM;
409 list->map->size = msize;
410 list->map->handle = obj;
411
412 /* Allocate a mmap offset */
413 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
414 msize / PAGE_SIZE,
415 0, 0);
416
417 if (!list->file_offset_node) {
418 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
419 kfree(list->map);
420 return -ENOMEM;
421 }
422
423 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
424 msize / PAGE_SIZE, 0);
425
426 if (!list->file_offset_node) {
427 DRM_ERROR("Unable to create the file_offset_node\n");
428 kfree(list->map);
429 return -ENOMEM;
430 }
431
432 list->hash.key = list->file_offset_node->start;
433 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
434 DRM_ERROR("Failed to add to map hash\n");
435 drm_mm_put_block(list->file_offset_node);
436 kfree(list->map);
437 return -ENOMEM;
438 }
439
440 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
441
442 return 0;
443}
444
445int
446kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
447 unsigned long *len)
448{
449 struct file *filp;
450 struct drm_device *dev;
451 struct drm_file *file_priv;
452 struct drm_gem_object *obj;
453 struct drm_kgsl_gem_object *priv;
454 int ret = 0;
455
456 filp = fget(drm_fd);
457 if (unlikely(filp == NULL)) {
458 DRM_ERROR("Unable to ghet the DRM file descriptor\n");
459 return -EINVAL;
460 }
461 file_priv = filp->private_data;
462 if (unlikely(file_priv == NULL)) {
463 DRM_ERROR("Unable to get the file private data\n");
464 fput(filp);
465 return -EINVAL;
466 }
467 dev = file_priv->minor->dev;
468 if (unlikely(dev == NULL)) {
469 DRM_ERROR("Unable to get the minor device\n");
470 fput(filp);
471 return -EINVAL;
472 }
473
474 obj = drm_gem_object_lookup(dev, file_priv, handle);
475 if (unlikely(obj == NULL)) {
476 DRM_ERROR("Invalid GEM handle %x\n", handle);
477 fput(filp);
478 return -EBADF;
479 }
480
481 mutex_lock(&dev->struct_mutex);
482 priv = obj->driver_private;
483
484 /* We can only use the MDP for PMEM regions */
485
486 if (TYPE_IS_PMEM(priv->type)) {
487 *start = priv->memdesc.physaddr +
488 priv->bufs[priv->active].offset;
489
490 *len = priv->memdesc.size;
491
492 kgsl_gem_mem_flush(&priv->memdesc,
493 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
494 } else {
495 *start = 0;
496 *len = 0;
497 ret = -EINVAL;
498 }
499
500 drm_gem_object_unreference(obj);
501 mutex_unlock(&dev->struct_mutex);
502
503 fput(filp);
504 return ret;
505}
506
507static int
508kgsl_gem_init_obj(struct drm_device *dev,
509 struct drm_file *file_priv,
510 struct drm_gem_object *obj,
511 int *handle)
512{
513 struct drm_kgsl_gem_object *priv;
514 int ret, i;
515
516 mutex_lock(&dev->struct_mutex);
517 priv = obj->driver_private;
518
519 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
520 priv->bufcount = 1;
521 priv->active = 0;
522 priv->bound = 0;
523
524 /* To preserve backwards compatability, the default memory source
525 is EBI */
526
527 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
528
529 ret = drm_gem_handle_create(file_priv, obj, handle);
530
531 drm_gem_object_handle_unreference(obj);
532 INIT_LIST_HEAD(&priv->wait_list);
533
534 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
535 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
536 priv->wait_entries[i].pid = 0;
537 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
538 }
539
540 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
541 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
542 priv->fence_entries[i].in_use = 0;
543 priv->fence_entries[i].gem_obj = obj;
544 }
545
546 mutex_unlock(&dev->struct_mutex);
547 return ret;
548}
549
550int
551kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
553{
554 struct drm_kgsl_gem_create *create = data;
555 struct drm_gem_object *obj;
556 int ret, handle;
557
558 /* Page align the size so we can allocate multiple buffers */
559 create->size = ALIGN(create->size, 4096);
560
561 obj = drm_gem_object_alloc(dev, create->size);
562
563 if (obj == NULL) {
564 DRM_ERROR("Unable to allocate the GEM object\n");
565 return -ENOMEM;
566 }
567
568 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
569 if (ret)
570 return ret;
571
572 create->handle = handle;
573 return 0;
574}
575
576int
577kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
578 struct drm_file *file_priv)
579{
580 struct drm_kgsl_gem_create_fd *args = data;
581 struct file *file;
582 dev_t rdev;
583 struct fb_info *info;
584 struct drm_gem_object *obj;
585 struct drm_kgsl_gem_object *priv;
586 int ret, put_needed, handle;
587
588 file = fget_light(args->fd, &put_needed);
589
590 if (file == NULL) {
591 DRM_ERROR("Unable to get the file object\n");
592 return -EBADF;
593 }
594
595 rdev = file->f_dentry->d_inode->i_rdev;
596
597 /* Only framebuffer objects are supported ATM */
598
599 if (MAJOR(rdev) != FB_MAJOR) {
600 DRM_ERROR("File descriptor is not a framebuffer\n");
601 ret = -EBADF;
602 goto error_fput;
603 }
604
605 info = registered_fb[MINOR(rdev)];
606
607 if (info == NULL) {
608 DRM_ERROR("Framebuffer minor %d is not registered\n",
609 MINOR(rdev));
610 ret = -EBADF;
611 goto error_fput;
612 }
613
614 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
615
616 if (obj == NULL) {
617 DRM_ERROR("Unable to allocate GEM object\n");
618 ret = -ENOMEM;
619 goto error_fput;
620 }
621
622 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
623
624 if (ret)
625 goto error_fput;
626
627 mutex_lock(&dev->struct_mutex);
628
629 priv = obj->driver_private;
630 priv->memdesc.physaddr = info->fix.smem_start;
631 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
632
633 mutex_unlock(&dev->struct_mutex);
634 args->handle = handle;
635
636error_fput:
637 fput_light(file, put_needed);
638
639 return ret;
640}
641
642int
643kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
644 struct drm_file *file_priv)
645{
646 struct drm_kgsl_gem_memtype *args = data;
647 struct drm_gem_object *obj;
648 struct drm_kgsl_gem_object *priv;
649 int ret = 0;
650
651 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
652
653 if (obj == NULL) {
654 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
655 return -EBADF;
656 }
657
658 mutex_lock(&dev->struct_mutex);
659 priv = obj->driver_private;
660
661 if (TYPE_IS_FD(priv->type))
662 ret = -EINVAL;
663 else {
664 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
665 priv->type = args->type;
666 else
667 ret = -EINVAL;
668 }
669
670 drm_gem_object_unreference(obj);
671 mutex_unlock(&dev->struct_mutex);
672
673 return ret;
674}
675
676int
677kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
678 struct drm_file *file_priv)
679{
680 struct drm_kgsl_gem_memtype *args = data;
681 struct drm_gem_object *obj;
682 struct drm_kgsl_gem_object *priv;
683
684 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
685
686 if (obj == NULL) {
687 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
688 return -EBADF;
689 }
690
691 mutex_lock(&dev->struct_mutex);
692 priv = obj->driver_private;
693
694 args->type = priv->type;
695
696 drm_gem_object_unreference(obj);
697 mutex_unlock(&dev->struct_mutex);
698
699 return 0;
700}
701
702int
703kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 struct drm_kgsl_gem_bind_gpu *args = data;
707 struct drm_gem_object *obj;
708 struct drm_kgsl_gem_object *priv;
709
710 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
711
712 if (obj == NULL) {
713 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
714 return -EBADF;
715 }
716
717 mutex_lock(&dev->struct_mutex);
718 priv = obj->driver_private;
719
720 if (--priv->bound == 0)
721 kgsl_gem_unmap(obj);
722
723 drm_gem_object_unreference(obj);
724 mutex_unlock(&dev->struct_mutex);
725 return 0;
726}
727
728#ifdef CONFIG_MSM_KGSL_MMU
729static int
730kgsl_gem_map(struct drm_gem_object *obj)
731{
732 struct drm_kgsl_gem_object *priv = obj->driver_private;
733 int index;
734 int ret = -EINVAL;
735
736 if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
737 return 0;
738
739 /* Get the global page table */
740
741 if (priv->pagetable == NULL) {
742 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
743
744 if (priv->pagetable == NULL) {
745 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
746 return -EINVAL;
747 }
748 }
749
750 priv->memdesc.pagetable = priv->pagetable;
751
752 ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
753 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
754
755 if (!ret) {
756 for (index = 0; index < priv->bufcount; index++) {
757 priv->bufs[index].gpuaddr =
758 priv->memdesc.gpuaddr +
759 priv->bufs[index].offset;
760 }
761 }
762
763 /* Add cached memory to the list to be cached */
764
765 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
766 priv->type & DRM_KGSL_GEM_CACHE_MASK)
767 list_add(&priv->list, &kgsl_mem_list);
768
769 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
770
771 return ret;
772}
773#else
774static int
775kgsl_gem_map(struct drm_gem_object *obj)
776{
777 struct drm_kgsl_gem_object *priv = obj->driver_private;
778 int index;
779
780 if (TYPE_IS_PMEM(priv->type)) {
781 for (index = 0; index < priv->bufcount; index++)
782 priv->bufs[index].gpuaddr =
783 priv->memdesc.physaddr + priv->bufs[index].offset;
784
785 return 0;
786 }
787
788 return -EINVAL;
789}
790#endif
791
792int
793kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
794 struct drm_file *file_priv)
795{
796 struct drm_kgsl_gem_bind_gpu *args = data;
797 struct drm_gem_object *obj;
798 struct drm_kgsl_gem_object *priv;
799 int ret = 0;
800
801 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
802
803 if (obj == NULL) {
804 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
805 return -EBADF;
806 }
807
808 mutex_lock(&dev->struct_mutex);
809 priv = obj->driver_private;
810
811 if (priv->bound++ == 0) {
812
813 if (!kgsl_gem_memory_allocated(obj)) {
814 DRM_ERROR("Memory not allocated for this object\n");
815 ret = -ENOMEM;
816 goto out;
817 }
818
819 ret = kgsl_gem_map(obj);
820
821 /* This is legacy behavior - use GET_BUFFERINFO instead */
822 args->gpuptr = priv->bufs[0].gpuaddr;
823 }
824out:
825 drm_gem_object_unreference(obj);
826 mutex_unlock(&dev->struct_mutex);
827 return ret;
828}
829
830/* Allocate the memory and prepare it for CPU mapping */
831
832int
833kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
834 struct drm_file *file_priv)
835{
836 struct drm_kgsl_gem_alloc *args = data;
837 struct drm_gem_object *obj;
838 struct drm_kgsl_gem_object *priv;
839 int ret;
840
841 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
842
843 if (obj == NULL) {
844 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
845 return -EBADF;
846 }
847
848 mutex_lock(&dev->struct_mutex);
849 priv = obj->driver_private;
850
851 ret = kgsl_gem_alloc_memory(obj);
852
853 if (ret) {
854 DRM_ERROR("Unable to allocate object memory\n");
855 } else if (!priv->mmap_offset) {
856 ret = kgsl_gem_create_mmap_offset(obj);
857 if (ret)
858 DRM_ERROR("Unable to create a mmap offset\n");
859 }
860
861 args->offset = priv->mmap_offset;
862
863 drm_gem_object_unreference(obj);
864 mutex_unlock(&dev->struct_mutex);
865
866 return ret;
867}
868
869int
870kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv)
872{
873 struct drm_kgsl_gem_mmap *args = data;
874 struct drm_gem_object *obj;
875 unsigned long addr;
876
877 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
878
879 if (obj == NULL) {
880 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
881 return -EBADF;
882 }
883
884 down_write(&current->mm->mmap_sem);
885
886 addr = do_mmap(obj->filp, 0, args->size,
887 PROT_READ | PROT_WRITE, MAP_SHARED,
888 args->offset);
889
890 up_write(&current->mm->mmap_sem);
891
892 mutex_lock(&dev->struct_mutex);
893 drm_gem_object_unreference(obj);
894 mutex_unlock(&dev->struct_mutex);
895
896 if (IS_ERR((void *) addr))
897 return addr;
898
899 args->hostptr = (uint32_t) addr;
900 return 0;
901}
902
903/* This function is deprecated */
904
905int
906kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
907 struct drm_file *file_priv)
908{
909 struct drm_kgsl_gem_prep *args = data;
910 struct drm_gem_object *obj;
911 struct drm_kgsl_gem_object *priv;
912 int ret;
913
914 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
915
916 if (obj == NULL) {
917 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
918 return -EBADF;
919 }
920
921 mutex_lock(&dev->struct_mutex);
922 priv = obj->driver_private;
923
924 ret = kgsl_gem_alloc_memory(obj);
925 if (ret) {
926 DRM_ERROR("Unable to allocate object memory\n");
927 drm_gem_object_unreference(obj);
928 mutex_unlock(&dev->struct_mutex);
929 return ret;
930 }
931
932 if (priv->mmap_offset == 0) {
933 ret = kgsl_gem_create_mmap_offset(obj);
934 if (ret) {
935 drm_gem_object_unreference(obj);
936 mutex_unlock(&dev->struct_mutex);
937 return ret;
938 }
939 }
940
941 args->offset = priv->mmap_offset;
942 args->phys = priv->memdesc.physaddr;
943
944 drm_gem_object_unreference(obj);
945 mutex_unlock(&dev->struct_mutex);
946
947 return 0;
948}
949
950int
951kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
952 struct drm_file *file_priv)
953{
954 struct drm_kgsl_gem_bufinfo *args = data;
955 struct drm_gem_object *obj;
956 struct drm_kgsl_gem_object *priv;
957 int ret = -EINVAL;
958 int index;
959
960 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
961
962 if (obj == NULL) {
963 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
964 return -EBADF;
965 }
966
967 mutex_lock(&dev->struct_mutex);
968 priv = obj->driver_private;
969
970 if (!kgsl_gem_memory_allocated(obj)) {
971 DRM_ERROR("Memory not allocated for this object\n");
972 goto out;
973 }
974
975 for (index = 0; index < priv->bufcount; index++) {
976 args->offset[index] = priv->bufs[index].offset;
977 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
978 }
979
980 args->count = priv->bufcount;
981 args->active = priv->active;
982
983 ret = 0;
984
985out:
986 drm_gem_object_unreference(obj);
987 mutex_unlock(&dev->struct_mutex);
988
989 return ret;
990}
991
992int
993kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
994 struct drm_file *file_priv)
995{
996 struct drm_kgsl_gem_bufcount *args = data;
997 struct drm_gem_object *obj;
998 struct drm_kgsl_gem_object *priv;
999 int ret = -EINVAL;
1000
1001 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
1002 return -EINVAL;
1003
1004 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1005
1006 if (obj == NULL) {
1007 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1008 return -EBADF;
1009 }
1010
1011 mutex_lock(&dev->struct_mutex);
1012 priv = obj->driver_private;
1013
1014 /* It is too much math to worry about what happens if we are already
1015 allocated, so just bail if we are */
1016
1017 if (kgsl_gem_memory_allocated(obj)) {
1018 DRM_ERROR("Memory already allocated - cannot change"
1019 "number of buffers\n");
1020 goto out;
1021 }
1022
1023 priv->bufcount = args->bufcount;
1024 ret = 0;
1025
1026out:
1027 drm_gem_object_unreference(obj);
1028 mutex_unlock(&dev->struct_mutex);
1029
1030 return ret;
1031}
1032
1033int
1034kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
1035 struct drm_file *file_priv)
1036{
1037 struct drm_kgsl_gem_active *args = data;
1038 struct drm_gem_object *obj;
1039 struct drm_kgsl_gem_object *priv;
1040 int ret = -EINVAL;
1041
1042 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1043
1044 if (obj == NULL) {
1045 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
1046 return -EBADF;
1047 }
1048
1049 mutex_lock(&dev->struct_mutex);
1050 priv = obj->driver_private;
1051
1052 if (args->active < 0 || args->active >= priv->bufcount) {
1053 DRM_ERROR("Invalid active buffer %d\n", args->active);
1054 goto out;
1055 }
1056
1057 priv->active = args->active;
1058 ret = 0;
1059
1060out:
1061 drm_gem_object_unreference(obj);
1062 mutex_unlock(&dev->struct_mutex);
1063
1064 return ret;
1065}
1066
1067int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1068{
1069 struct drm_gem_object *obj = vma->vm_private_data;
1070 struct drm_device *dev = obj->dev;
1071 struct drm_kgsl_gem_object *priv;
1072 unsigned long offset, pg;
1073 struct page *page;
1074
1075 mutex_lock(&dev->struct_mutex);
1076
1077 priv = obj->driver_private;
1078
1079 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
1080 pg = (unsigned long) priv->memdesc.hostptr + offset;
1081
1082 page = vmalloc_to_page((void *) pg);
1083 if (!page) {
1084 mutex_unlock(&dev->struct_mutex);
1085 return VM_FAULT_SIGBUS;
1086 }
1087
1088 get_page(page);
1089 vmf->page = page;
1090
1091 mutex_unlock(&dev->struct_mutex);
1092 return 0;
1093}
1094
1095int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1096{
1097 struct drm_gem_object *obj = vma->vm_private_data;
1098 struct drm_device *dev = obj->dev;
1099 struct drm_kgsl_gem_object *priv;
1100 unsigned long offset, pfn;
1101 int ret = 0;
1102
1103 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
1104 PAGE_SHIFT;
1105
1106 mutex_lock(&dev->struct_mutex);
1107
1108 priv = obj->driver_private;
1109
1110 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
1111 ret = vm_insert_pfn(vma,
1112 (unsigned long) vmf->virtual_address, pfn);
1113 mutex_unlock(&dev->struct_mutex);
1114
1115 switch (ret) {
1116 case -ENOMEM:
1117 case -EAGAIN:
1118 return VM_FAULT_OOM;
1119 case -EFAULT:
1120 return VM_FAULT_SIGBUS;
1121 default:
1122 return VM_FAULT_NOPAGE;
1123 }
1124}
1125
1126static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
1127 .fault = kgsl_gem_kmem_fault,
1128 .open = drm_gem_vm_open,
1129 .close = drm_gem_vm_close,
1130};
1131
1132static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
1133 .fault = kgsl_gem_phys_fault,
1134 .open = drm_gem_vm_open,
1135 .close = drm_gem_vm_close,
1136};
1137
1138/* This is a clone of the standard drm_gem_mmap function modified to allow
1139 us to properly map KMEM regions as well as the PMEM regions */
1140
1141int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1142{
1143 struct drm_file *priv = filp->private_data;
1144 struct drm_device *dev = priv->minor->dev;
1145 struct drm_gem_mm *mm = dev->mm_private;
1146 struct drm_local_map *map = NULL;
1147 struct drm_gem_object *obj;
1148 struct drm_hash_item *hash;
1149 struct drm_kgsl_gem_object *gpriv;
1150 int ret = 0;
1151
1152 mutex_lock(&dev->struct_mutex);
1153
1154 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
1155 mutex_unlock(&dev->struct_mutex);
1156 return drm_mmap(filp, vma);
1157 }
1158
1159 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
1160 if (!map ||
1161 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
1162 ret = -EPERM;
1163 goto out_unlock;
1164 }
1165
1166 /* Check for valid size. */
1167 if (map->size < vma->vm_end - vma->vm_start) {
1168 ret = -EINVAL;
1169 goto out_unlock;
1170 }
1171
1172 obj = map->handle;
1173
1174 gpriv = obj->driver_private;
1175
1176 /* VM_PFNMAP is only for memory that doesn't use struct page
1177 * in other words, not "normal" memory. If you try to use it
1178 * with "normal" memory then the mappings don't get flushed. */
1179
1180 if (TYPE_IS_MEM(gpriv->type)) {
1181 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1182 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1183 } else {
1184 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1185 VM_DONTEXPAND;
1186 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1187 }
1188
1189 vma->vm_private_data = map->handle;
1190
1191
1192 /* Take care of requested caching policy */
1193 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1194 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1195 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1196 vma->vm_page_prot =
1197 pgprot_writebackwacache(vma->vm_page_prot);
1198 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1199 vma->vm_page_prot =
1200 pgprot_writebackcache(vma->vm_page_prot);
1201 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1202 vma->vm_page_prot =
1203 pgprot_writethroughcache(vma->vm_page_prot);
1204 else
1205 vma->vm_page_prot =
1206 pgprot_writecombine(vma->vm_page_prot);
1207 } else {
1208 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1209 vma->vm_page_prot =
1210 pgprot_noncached(vma->vm_page_prot);
1211 else
1212 /* default pmem is WC */
1213 vma->vm_page_prot =
1214 pgprot_writecombine(vma->vm_page_prot);
1215 }
1216
1217 /* flush out existing KMEM cached mappings if new ones are
1218 * of uncached type */
1219 if (IS_MEM_UNCACHED(gpriv->type))
1220 kgsl_cache_range_op(&gpriv->memdesc,
1221 KGSL_CACHE_OP_FLUSH);
1222
1223 /* Add the other memory types here */
1224
1225 /* Take a ref for this mapping of the object, so that the fault
1226 * handler can dereference the mmap offset's pointer to the object.
1227 * This reference is cleaned up by the corresponding vm_close
1228 * (which should happen whether the vma was created by this call, or
1229 * by a vm_open due to mremap or partial unmap or whatever).
1230 */
1231 drm_gem_object_reference(obj);
1232
1233 vma->vm_file = filp; /* Needed for drm_vm_open() */
1234 drm_vm_open_locked(vma);
1235
1236out_unlock:
1237 mutex_unlock(&dev->struct_mutex);
1238
1239 return ret;
1240}
1241
1242void
1243cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1244{
1245 int j;
1246 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1247 struct drm_kgsl_gem_object *unlock_obj;
1248 struct drm_gem_object *obj;
1249 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1250
1251 fence->ts_valid = 0;
1252 fence->timestamp = -1;
1253 fence->ts_device = -1;
1254
1255 /* Walk the list of buffers in this fence and clean up the */
1256 /* references. Note that this can cause memory allocations */
1257 /* to be freed */
1258 for (j = fence->num_buffers; j > 0; j--) {
1259 this_fence_entry =
1260 (struct drm_kgsl_gem_object_fence_list_entry *)
1261 fence->buffers_in_fence.prev;
1262
1263 this_fence_entry->in_use = 0;
1264 obj = this_fence_entry->gem_obj;
1265 unlock_obj = obj->driver_private;
1266
1267 /* Delete it from the list */
1268
1269 list_del(&this_fence_entry->list);
1270
1271 /* we are unlocking - see if there are other pids waiting */
1272 if (check_waiting) {
1273 if (!list_empty(&unlock_obj->wait_list)) {
1274 lock_next =
1275 (struct drm_kgsl_gem_object_wait_list_entry *)
1276 unlock_obj->wait_list.prev;
1277
1278 list_del((struct list_head *)&lock_next->list);
1279
1280 unlock_obj->lockpid = 0;
1281 wake_up_interruptible(
1282 &lock_next->process_wait_q);
1283 lock_next->pid = 0;
1284
1285 } else {
1286 /* List is empty so set pid to 0 */
1287 unlock_obj->lockpid = 0;
1288 }
1289 }
1290
1291 drm_gem_object_unreference(obj);
1292 }
1293 /* here all the buffers in the fence are released */
1294 /* clear the fence entry */
1295 fence->fence_id = ENTRY_EMPTY;
1296}
1297
1298int
1299find_empty_fence(void)
1300{
1301 int i;
1302
1303 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1304 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1305 gem_buf_fence[i].fence_id = fence_id++;
1306 gem_buf_fence[i].ts_valid = 0;
1307 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1308 if (fence_id == 0xFFFFFFF0)
1309 fence_id = 1;
1310 return i;
1311 } else {
1312
1313 /* Look for entries to be cleaned up */
1314 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1315 cleanup_fence(&gem_buf_fence[i], 0);
1316 }
1317 }
1318
1319 return ENTRY_EMPTY;
1320}
1321
1322int
1323find_fence(int index)
1324{
1325 int i;
1326
1327 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1328 if (gem_buf_fence[i].fence_id == index)
1329 return i;
1330 }
1331
1332 return ENTRY_EMPTY;
1333}
1334
1335void
1336wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1337{
1338 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1339 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1340 struct drm_kgsl_gem_object *unlock_obj;
1341 struct drm_gem_object *obj;
1342
1343 /* TS has expired when we get here */
1344 fence->ts_valid = 0;
1345 fence->timestamp = -1;
1346 fence->ts_device = -1;
1347
1348 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1349 obj = this_fence_entry->gem_obj;
1350 unlock_obj = obj->driver_private;
1351
1352 if (!list_empty(&unlock_obj->wait_list)) {
1353 lock_next =
1354 (struct drm_kgsl_gem_object_wait_list_entry *)
1355 unlock_obj->wait_list.prev;
1356
1357 /* Unblock the pid */
1358 lock_next->pid = 0;
1359
1360 /* Delete it from the list */
1361 list_del((struct list_head *)&lock_next->list);
1362
1363 unlock_obj->lockpid = 0;
1364 wake_up_interruptible(&lock_next->process_wait_q);
1365
1366 } else {
1367 /* List is empty so set pid to 0 */
1368 unlock_obj->lockpid = 0;
1369 }
1370 }
1371 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1372}
1373
1374static int kgsl_ts_notifier_cb(struct notifier_block *blk,
1375 unsigned long code, void *_param)
1376{
1377 struct drm_kgsl_gem_object_fence *fence;
1378 struct kgsl_device *device = kgsl_get_device(code);
1379 int i;
1380
1381 /* loop through the fences to see what things can be processed */
1382
1383 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1384 fence = &gem_buf_fence[i];
1385 if (!fence->ts_valid || fence->ts_device != code)
1386 continue;
1387
1388 if (kgsl_check_timestamp(device, fence->timestamp))
1389 wakeup_fence_entries(fence);
1390 }
1391
1392 return 0;
1393}
1394
1395int
1396kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1397 struct drm_file *file_priv)
1398{
1399 /* The purpose of this function is to lock a given set of handles. */
1400 /* The driver will maintain a list of locked handles. */
1401 /* If a request comes in for a handle that's locked the thread will */
1402 /* block until it's no longer in use. */
1403
1404 struct drm_kgsl_gem_lock_handles *args = data;
1405 struct drm_gem_object *obj;
1406 struct drm_kgsl_gem_object *priv;
1407 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1408 struct drm_kgsl_gem_object_fence *fence;
1409 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1410 int i, j;
1411 int result = 0;
1412 uint32_t *lock_list;
1413 uint32_t *work_list = NULL;
1414 int32_t fence_index;
1415
1416 /* copy in the data from user space */
1417 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1418 if (!lock_list) {
1419 DRM_ERROR("Unable allocate memory for lock list\n");
1420 result = -ENOMEM;
1421 goto error;
1422 }
1423
1424 if (copy_from_user(lock_list, args->handle_list,
1425 sizeof(uint32_t) * args->num_handles)) {
1426 DRM_ERROR("Unable to copy the lock list from the user\n");
1427 result = -EFAULT;
1428 goto free_handle_list;
1429 }
1430
1431
1432 work_list = lock_list;
1433 mutex_lock(&dev->struct_mutex);
1434
1435 /* build the fence for this group of handles */
1436 fence_index = find_empty_fence();
1437 if (fence_index == ENTRY_EMPTY) {
1438 DRM_ERROR("Unable to find a empty fence\n");
1439 args->lock_id = 0xDEADBEEF;
1440 result = -EFAULT;
1441 goto out_unlock;
1442 }
1443
1444 fence = &gem_buf_fence[fence_index];
1445 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1446 args->lock_id = gem_buf_fence[fence_index].fence_id;
1447
1448 for (j = args->num_handles; j > 0; j--, lock_list++) {
1449 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1450
1451 if (obj == NULL) {
1452 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1453 result = -EBADF;
1454 goto out_unlock;
1455 }
1456
1457 priv = obj->driver_private;
1458 this_fence_entry = NULL;
1459
1460 /* get a fence entry to hook into the fence */
1461 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1462 if (!priv->fence_entries[i].in_use) {
1463 this_fence_entry = &priv->fence_entries[i];
1464 this_fence_entry->in_use = 1;
1465 break;
1466 }
1467 }
1468
1469 if (this_fence_entry == NULL) {
1470 fence->num_buffers = 0;
1471 fence->fence_id = ENTRY_EMPTY;
1472 args->lock_id = 0xDEADBEAD;
1473 result = -EFAULT;
1474 drm_gem_object_unreference(obj);
1475 goto out_unlock;
1476 }
1477
1478 /* We're trying to lock - add to a fence */
1479 list_add((struct list_head *)this_fence_entry,
1480 &gem_buf_fence[fence_index].buffers_in_fence);
1481 if (priv->lockpid) {
1482
1483 if (priv->lockpid == args->pid) {
1484 /* now that things are running async this */
1485 /* happens when an op isn't done */
1486 /* so it's already locked by the calling pid */
1487 continue;
1488 }
1489
1490
1491 /* if a pid already had it locked */
1492 /* create and add to wait list */
1493 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1494 if (priv->wait_entries[i].in_use == 0) {
1495 /* this one is empty */
1496 lock_item = &priv->wait_entries[i];
1497 lock_item->in_use = 1;
1498 lock_item->pid = args->pid;
1499 INIT_LIST_HEAD((struct list_head *)
1500 &priv->wait_entries[i]);
1501 break;
1502 }
1503 }
1504
1505 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1506
1507 result = -EFAULT;
1508 drm_gem_object_unreference(obj);
1509 goto out_unlock;
1510 }
1511
1512 list_add_tail((struct list_head *)&lock_item->list,
1513 &priv->wait_list);
1514 mutex_unlock(&dev->struct_mutex);
1515 /* here we need to block */
1516 wait_event_interruptible_timeout(
1517 priv->wait_entries[i].process_wait_q,
1518 (priv->lockpid == 0),
1519 msecs_to_jiffies(64));
1520 mutex_lock(&dev->struct_mutex);
1521 lock_item->in_use = 0;
1522 }
1523
1524 /* Getting here means no one currently holds the lock */
1525 priv->lockpid = args->pid;
1526
1527 args->lock_id = gem_buf_fence[fence_index].fence_id;
1528 }
1529 fence->lockpid = args->pid;
1530
1531out_unlock:
1532 mutex_unlock(&dev->struct_mutex);
1533
1534free_handle_list:
1535 kfree(work_list);
1536
1537error:
1538 return result;
1539}
1540
1541int
1542kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1543 struct drm_file *file_priv)
1544{
1545 struct drm_kgsl_gem_unlock_handles *args = data;
1546 int result = 0;
1547 int32_t fence_index;
1548
1549 mutex_lock(&dev->struct_mutex);
1550 fence_index = find_fence(args->lock_id);
1551 if (fence_index == ENTRY_EMPTY) {
1552 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1553 result = -EFAULT;
1554 goto out_unlock;
1555 }
1556
1557 cleanup_fence(&gem_buf_fence[fence_index], 1);
1558
1559out_unlock:
1560 mutex_unlock(&dev->struct_mutex);
1561
1562 return result;
1563}
1564
1565
1566int
1567kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1568 struct drm_file *file_priv)
1569{
1570 struct drm_kgsl_gem_unlock_on_ts *args = data;
1571 int result = 0;
1572 int ts_done = 0;
1573 int32_t fence_index, ts_device;
1574 struct drm_kgsl_gem_object_fence *fence;
1575 struct kgsl_device *device;
1576
1577 if (args->type == DRM_KGSL_GEM_TS_3D)
1578 ts_device = KGSL_DEVICE_3D0;
1579 else if (args->type == DRM_KGSL_GEM_TS_2D)
1580 ts_device = KGSL_DEVICE_2D0;
1581 else {
1582 result = -EINVAL;
1583 goto error;
1584 }
1585
1586 device = kgsl_get_device(ts_device);
1587 ts_done = kgsl_check_timestamp(device, args->timestamp);
1588
1589 mutex_lock(&dev->struct_mutex);
1590
1591 fence_index = find_fence(args->lock_id);
1592 if (fence_index == ENTRY_EMPTY) {
1593 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1594 result = -EFAULT;
1595 goto out_unlock;
1596 }
1597
1598 fence = &gem_buf_fence[fence_index];
1599 fence->ts_device = ts_device;
1600
1601 if (!ts_done)
1602 fence->ts_valid = 1;
1603 else
1604 cleanup_fence(fence, 1);
1605
1606
1607out_unlock:
1608 mutex_unlock(&dev->struct_mutex);
1609
1610error:
1611 return result;
1612}
1613
1614struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1615 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1616 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1617 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1618 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1619 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1620 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1621 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1622 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1623 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1624 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1625 kgsl_gem_set_bufcount_ioctl, 0),
1626 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1627 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1628 kgsl_gem_lock_handle_ioctl, 0),
1629 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1630 kgsl_gem_unlock_handle_ioctl, 0),
1631 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1632 kgsl_gem_unlock_on_ts_ioctl, 0),
1633 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1634 DRM_MASTER),
1635};
1636
1637static struct drm_driver driver = {
1638 .driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
1639 .load = kgsl_drm_load,
1640 .unload = kgsl_drm_unload,
1641 .firstopen = kgsl_drm_firstopen,
1642 .lastclose = kgsl_drm_lastclose,
1643 .preclose = kgsl_drm_preclose,
1644 .suspend = kgsl_drm_suspend,
1645 .resume = kgsl_drm_resume,
1646 .reclaim_buffers = drm_core_reclaim_buffers,
1647 .gem_init_object = kgsl_gem_init_object,
1648 .gem_free_object = kgsl_gem_free_object,
1649 .ioctls = kgsl_drm_ioctls,
1650
1651 .fops = {
1652 .owner = THIS_MODULE,
1653 .open = drm_open,
1654 .release = drm_release,
1655 .unlocked_ioctl = drm_ioctl,
1656 .mmap = msm_drm_gem_mmap,
1657 .poll = drm_poll,
1658 .fasync = drm_fasync,
1659 },
1660
1661 .name = DRIVER_NAME,
1662 .desc = DRIVER_DESC,
1663 .date = DRIVER_DATE,
1664 .major = DRIVER_MAJOR,
1665 .minor = DRIVER_MINOR,
1666 .patchlevel = DRIVER_PATCHLEVEL,
1667};
1668
1669int kgsl_drm_init(struct platform_device *dev)
1670{
1671 int i;
1672
1673 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
1674 driver.platform_device = dev;
1675
1676 INIT_LIST_HEAD(&kgsl_mem_list);
1677
1678 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1679 gem_buf_fence[i].num_buffers = 0;
1680 gem_buf_fence[i].ts_valid = 0;
1681 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1682 }
1683
1684 return drm_init(&driver);
1685}
1686
1687void kgsl_drm_exit(void)
1688{
1689 drm_exit(&driver);
1690}