blob: e3f6f3b219b8a9643cdbf300ddc6904a48a385d4 [file] [log] [blame]
Michael Street8bacdd02012-01-05 14:55:01 -08001/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_device.h"
23#include "kgsl_drm.h"
24#include "kgsl_mmu.h"
25#include "kgsl_sharedmem.h"
26
27#define DRIVER_AUTHOR "Qualcomm"
28#define DRIVER_NAME "kgsl"
29#define DRIVER_DESC "KGSL DRM"
30#define DRIVER_DATE "20100127"
31
32#define DRIVER_MAJOR 2
33#define DRIVER_MINOR 1
34#define DRIVER_PATCHLEVEL 1
35
36#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
37
38#define ENTRY_EMPTY -1
39#define ENTRY_NEEDS_CLEANUP -2
40
Michael Street8bacdd02012-01-05 14:55:01 -080041#define DRM_KGSL_NOT_INITED -1
42#define DRM_KGSL_INITED 1
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
45#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
46
47/* Returns true if the memory type is in PMEM */
48
49#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
50#define TYPE_IS_PMEM(_t) \
51 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
52 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
53 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
54#else
55#define TYPE_IS_PMEM(_t) \
56 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
57 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
58#endif
59
60/* Returns true if the memory type is regular */
61
62#define TYPE_IS_MEM(_t) \
63 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
64 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
65 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
66
67#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
68
69/* Returns true if KMEM region is uncached */
70
71#define IS_MEM_UNCACHED(_t) \
72 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
73 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
74 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
75
76struct drm_kgsl_gem_object_wait_list_entry {
77 struct list_head list;
78 int pid;
79 int in_use;
80 wait_queue_head_t process_wait_q;
81};
82
83struct drm_kgsl_gem_object_fence {
84 int32_t fence_id;
85 unsigned int num_buffers;
86 int ts_valid;
87 unsigned int timestamp;
88 int ts_device;
89 int lockpid;
90 struct list_head buffers_in_fence;
91};
92
93struct drm_kgsl_gem_object_fence_list_entry {
94 struct list_head list;
95 int in_use;
96 struct drm_gem_object *gem_obj;
97};
98
99static int32_t fence_id = 0x1;
100
101static struct drm_kgsl_gem_object_fence
102 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
103
104struct drm_kgsl_gem_object {
105 struct drm_gem_object *obj;
106 uint32_t type;
107 struct kgsl_memdesc memdesc;
108 struct kgsl_pagetable *pagetable;
109 uint64_t mmap_offset;
110 int bufcount;
111 int flags;
112 struct list_head list;
113 int active;
114
115 struct {
116 uint32_t offset;
117 uint32_t gpuaddr;
118 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
119
120 int bound;
121 int lockpid;
122 /* Put these here to avoid allocing all the time */
123 struct drm_kgsl_gem_object_wait_list_entry
124 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
125 /* Each object can only appear in a single fence */
126 struct drm_kgsl_gem_object_fence_list_entry
127 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
128
129 struct list_head wait_list;
130};
131
Michael Street8bacdd02012-01-05 14:55:01 -0800132static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134/* This is a global list of all the memory currently mapped in the MMU */
135static struct list_head kgsl_mem_list;
136
137static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
138{
139 int cacheop = 0;
140
141 switch (op) {
142 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
143 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
144 DRM_KGSL_GEM_CACHE_WBACKWA))
145 cacheop = KGSL_CACHE_OP_CLEAN;
146
147 break;
148
149 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
150 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
151 DRM_KGSL_GEM_CACHE_WBACKWA |
152 DRM_KGSL_GEM_CACHE_WTHROUGH))
153 cacheop = KGSL_CACHE_OP_INV;
154 }
155
156 kgsl_cache_range_op(memdesc, cacheop);
157}
158
159/* Flush all the memory mapped in the MMU */
160
161void kgsl_gpu_mem_flush(int op)
162{
163 struct drm_kgsl_gem_object *entry;
164
165 list_for_each_entry(entry, &kgsl_mem_list, list) {
166 kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
167 }
168
169 /* Takes care of WT/WC case.
170 * More useful when we go barrierless
171 */
172 dmb();
173}
174
175/* TODO:
176 * Add vsync wait */
177
178static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
179{
180 return 0;
181}
182
183static int kgsl_drm_unload(struct drm_device *dev)
184{
185 return 0;
186}
187
188struct kgsl_drm_device_priv {
189 struct kgsl_device *device[KGSL_DEVICE_MAX];
190 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
191};
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
194{
195}
196
197static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
198{
199 return 0;
200}
201
202static int kgsl_drm_resume(struct drm_device *dev)
203{
204 return 0;
205}
206
207static void
208kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
209{
210 struct drm_device *dev = obj->dev;
211 struct drm_gem_mm *mm = dev->mm_private;
212 struct drm_kgsl_gem_object *priv = obj->driver_private;
213 struct drm_map_list *list;
214
215 list = &obj->map_list;
216 drm_ht_remove_item(&mm->offset_hash, &list->hash);
217 if (list->file_offset_node) {
218 drm_mm_put_block(list->file_offset_node);
219 list->file_offset_node = NULL;
220 }
221
222 kfree(list->map);
223 list->map = NULL;
224
225 priv->mmap_offset = 0;
226}
227
228static int
229kgsl_gem_memory_allocated(struct drm_gem_object *obj)
230{
231 struct drm_kgsl_gem_object *priv = obj->driver_private;
232 return priv->memdesc.size ? 1 : 0;
233}
234
235static int
236kgsl_gem_alloc_memory(struct drm_gem_object *obj)
237{
238 struct drm_kgsl_gem_object *priv = obj->driver_private;
239 int index;
Michael Street8bacdd02012-01-05 14:55:01 -0800240 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241
242 /* Return if the memory is already allocated */
243
244 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
245 return 0;
246
Michael Street8bacdd02012-01-05 14:55:01 -0800247 if (priv->pagetable == NULL) {
248 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
249
250 if (priv->pagetable == NULL) {
251 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
252 return -EINVAL;
253 }
254 }
255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 if (TYPE_IS_PMEM(priv->type)) {
257 int type;
258
259 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
Michael Street8bacdd02012-01-05 14:55:01 -0800260 priv->type & DRM_KGSL_GEM_PMEM_EBI) {
261 type = PMEM_MEMTYPE_EBI1;
262 result = kgsl_sharedmem_ebimem_user(
263 &priv->memdesc,
264 priv->pagetable,
265 obj->size * priv->bufcount,
266 0);
267 if (result) {
268 DRM_ERROR(
269 "Unable to allocate PMEM memory\n");
270 return result;
271 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 }
Michael Street8bacdd02012-01-05 14:55:01 -0800273 else
274 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275
276 } else if (TYPE_IS_MEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277
Michael Street8bacdd02012-01-05 14:55:01 -0800278 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
279 priv->type & DRM_KGSL_GEM_CACHE_MASK)
280 list_add(&priv->list, &kgsl_mem_list);
281
282 result = kgsl_sharedmem_vmalloc_user(&priv->memdesc,
283 priv->pagetable,
284 obj->size * priv->bufcount, 0);
285
286 if (result != 0) {
287 DRM_ERROR(
288 "Unable to allocate Vmalloc user memory\n");
289 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 } else
292 return -EINVAL;
293
Michael Street8bacdd02012-01-05 14:55:01 -0800294 for (index = 0; index < priv->bufcount; index++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700295 priv->bufs[index].offset = index * obj->size;
Michael Street8bacdd02012-01-05 14:55:01 -0800296 priv->bufs[index].gpuaddr =
297 priv->memdesc.gpuaddr +
298 priv->bufs[index].offset;
299 }
300 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301
302 return 0;
303}
304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306kgsl_gem_free_memory(struct drm_gem_object *obj)
307{
308 struct drm_kgsl_gem_object *priv = obj->driver_private;
309
310 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
311 return;
312
313 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
314 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
315
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 kgsl_sharedmem_free(&priv->memdesc);
Michael Street8bacdd02012-01-05 14:55:01 -0800317
318 kgsl_mmu_putpagetable(priv->pagetable);
319 priv->pagetable = NULL;
320
321 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
322 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
323 list_del(&priv->list);
324
325 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
326
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327}
328
329int
330kgsl_gem_init_object(struct drm_gem_object *obj)
331{
332 struct drm_kgsl_gem_object *priv;
333 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
334 if (priv == NULL) {
335 DRM_ERROR("Unable to create GEM object\n");
336 return -ENOMEM;
337 }
338
339 obj->driver_private = priv;
340 priv->obj = obj;
341
342 return 0;
343}
344
345void
346kgsl_gem_free_object(struct drm_gem_object *obj)
347{
348 kgsl_gem_free_memory(obj);
349 kgsl_gem_free_mmap_offset(obj);
350 drm_gem_object_release(obj);
351 kfree(obj->driver_private);
352}
353
354static int
355kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
356{
357 struct drm_device *dev = obj->dev;
358 struct drm_gem_mm *mm = dev->mm_private;
359 struct drm_kgsl_gem_object *priv = obj->driver_private;
360 struct drm_map_list *list;
361 int msize;
362
363 list = &obj->map_list;
364 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
365 if (list->map == NULL) {
366 DRM_ERROR("Unable to allocate drm_map_list\n");
367 return -ENOMEM;
368 }
369
370 msize = obj->size * priv->bufcount;
371
372 list->map->type = _DRM_GEM;
373 list->map->size = msize;
374 list->map->handle = obj;
375
376 /* Allocate a mmap offset */
377 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
378 msize / PAGE_SIZE,
379 0, 0);
380
381 if (!list->file_offset_node) {
382 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
383 kfree(list->map);
384 return -ENOMEM;
385 }
386
387 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
388 msize / PAGE_SIZE, 0);
389
390 if (!list->file_offset_node) {
391 DRM_ERROR("Unable to create the file_offset_node\n");
392 kfree(list->map);
393 return -ENOMEM;
394 }
395
396 list->hash.key = list->file_offset_node->start;
397 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
398 DRM_ERROR("Failed to add to map hash\n");
399 drm_mm_put_block(list->file_offset_node);
400 kfree(list->map);
401 return -ENOMEM;
402 }
403
404 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
405
406 return 0;
407}
408
409int
410kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
411 unsigned long *len)
412{
413 struct file *filp;
414 struct drm_device *dev;
415 struct drm_file *file_priv;
416 struct drm_gem_object *obj;
417 struct drm_kgsl_gem_object *priv;
418 int ret = 0;
419
420 filp = fget(drm_fd);
421 if (unlikely(filp == NULL)) {
Michael Street8bacdd02012-01-05 14:55:01 -0800422 DRM_ERROR("Unable to get the DRM file descriptor\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 return -EINVAL;
424 }
425 file_priv = filp->private_data;
426 if (unlikely(file_priv == NULL)) {
427 DRM_ERROR("Unable to get the file private data\n");
428 fput(filp);
429 return -EINVAL;
430 }
431 dev = file_priv->minor->dev;
432 if (unlikely(dev == NULL)) {
433 DRM_ERROR("Unable to get the minor device\n");
434 fput(filp);
435 return -EINVAL;
436 }
437
438 obj = drm_gem_object_lookup(dev, file_priv, handle);
439 if (unlikely(obj == NULL)) {
440 DRM_ERROR("Invalid GEM handle %x\n", handle);
441 fput(filp);
442 return -EBADF;
443 }
444
445 mutex_lock(&dev->struct_mutex);
446 priv = obj->driver_private;
447
448 /* We can only use the MDP for PMEM regions */
449
450 if (TYPE_IS_PMEM(priv->type)) {
451 *start = priv->memdesc.physaddr +
452 priv->bufs[priv->active].offset;
453
454 *len = priv->memdesc.size;
455
456 kgsl_gem_mem_flush(&priv->memdesc,
457 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
458 } else {
459 *start = 0;
460 *len = 0;
461 ret = -EINVAL;
462 }
463
464 drm_gem_object_unreference(obj);
465 mutex_unlock(&dev->struct_mutex);
466
467 fput(filp);
468 return ret;
469}
470
471static int
472kgsl_gem_init_obj(struct drm_device *dev,
473 struct drm_file *file_priv,
474 struct drm_gem_object *obj,
475 int *handle)
476{
477 struct drm_kgsl_gem_object *priv;
478 int ret, i;
479
480 mutex_lock(&dev->struct_mutex);
481 priv = obj->driver_private;
482
483 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
484 priv->bufcount = 1;
485 priv->active = 0;
486 priv->bound = 0;
487
488 /* To preserve backwards compatability, the default memory source
489 is EBI */
490
491 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
492
493 ret = drm_gem_handle_create(file_priv, obj, handle);
494
Michael Street8bacdd02012-01-05 14:55:01 -0800495 drm_gem_object_unreference(obj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 INIT_LIST_HEAD(&priv->wait_list);
497
498 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
499 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
500 priv->wait_entries[i].pid = 0;
501 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
502 }
503
504 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
505 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
506 priv->fence_entries[i].in_use = 0;
507 priv->fence_entries[i].gem_obj = obj;
508 }
509
510 mutex_unlock(&dev->struct_mutex);
511 return ret;
512}
513
514int
515kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
516 struct drm_file *file_priv)
517{
518 struct drm_kgsl_gem_create *create = data;
519 struct drm_gem_object *obj;
520 int ret, handle;
521
522 /* Page align the size so we can allocate multiple buffers */
523 create->size = ALIGN(create->size, 4096);
524
525 obj = drm_gem_object_alloc(dev, create->size);
526
527 if (obj == NULL) {
528 DRM_ERROR("Unable to allocate the GEM object\n");
529 return -ENOMEM;
530 }
531
532 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
533 if (ret)
534 return ret;
535
536 create->handle = handle;
537 return 0;
538}
539
540int
541kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *file_priv)
543{
544 struct drm_kgsl_gem_create_fd *args = data;
545 struct file *file;
546 dev_t rdev;
547 struct fb_info *info;
548 struct drm_gem_object *obj;
549 struct drm_kgsl_gem_object *priv;
550 int ret, put_needed, handle;
551
552 file = fget_light(args->fd, &put_needed);
553
554 if (file == NULL) {
555 DRM_ERROR("Unable to get the file object\n");
556 return -EBADF;
557 }
558
559 rdev = file->f_dentry->d_inode->i_rdev;
560
561 /* Only framebuffer objects are supported ATM */
562
563 if (MAJOR(rdev) != FB_MAJOR) {
564 DRM_ERROR("File descriptor is not a framebuffer\n");
565 ret = -EBADF;
566 goto error_fput;
567 }
568
569 info = registered_fb[MINOR(rdev)];
570
571 if (info == NULL) {
572 DRM_ERROR("Framebuffer minor %d is not registered\n",
573 MINOR(rdev));
574 ret = -EBADF;
575 goto error_fput;
576 }
577
578 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
579
580 if (obj == NULL) {
581 DRM_ERROR("Unable to allocate GEM object\n");
582 ret = -ENOMEM;
583 goto error_fput;
584 }
585
586 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
587
588 if (ret)
589 goto error_fput;
590
591 mutex_lock(&dev->struct_mutex);
592
593 priv = obj->driver_private;
594 priv->memdesc.physaddr = info->fix.smem_start;
595 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
596
597 mutex_unlock(&dev->struct_mutex);
598 args->handle = handle;
599
600error_fput:
601 fput_light(file, put_needed);
602
603 return ret;
604}
605
606int
607kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv)
609{
610 struct drm_kgsl_gem_memtype *args = data;
611 struct drm_gem_object *obj;
612 struct drm_kgsl_gem_object *priv;
613 int ret = 0;
614
615 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
616
617 if (obj == NULL) {
618 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
619 return -EBADF;
620 }
621
622 mutex_lock(&dev->struct_mutex);
623 priv = obj->driver_private;
624
625 if (TYPE_IS_FD(priv->type))
626 ret = -EINVAL;
627 else {
628 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
629 priv->type = args->type;
630 else
631 ret = -EINVAL;
632 }
633
634 drm_gem_object_unreference(obj);
635 mutex_unlock(&dev->struct_mutex);
636
637 return ret;
638}
639
640int
641kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
642 struct drm_file *file_priv)
643{
644 struct drm_kgsl_gem_memtype *args = data;
645 struct drm_gem_object *obj;
646 struct drm_kgsl_gem_object *priv;
647
648 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
649
650 if (obj == NULL) {
651 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
652 return -EBADF;
653 }
654
655 mutex_lock(&dev->struct_mutex);
656 priv = obj->driver_private;
657
658 args->type = priv->type;
659
660 drm_gem_object_unreference(obj);
661 mutex_unlock(&dev->struct_mutex);
662
663 return 0;
664}
665
666int
667kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
668 struct drm_file *file_priv)
669{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 return 0;
671}
672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673int
674kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv)
676{
Michael Street8bacdd02012-01-05 14:55:01 -0800677 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678}
679
680/* Allocate the memory and prepare it for CPU mapping */
681
682int
683kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
684 struct drm_file *file_priv)
685{
686 struct drm_kgsl_gem_alloc *args = data;
687 struct drm_gem_object *obj;
688 struct drm_kgsl_gem_object *priv;
689 int ret;
690
691 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
692
693 if (obj == NULL) {
694 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
695 return -EBADF;
696 }
697
698 mutex_lock(&dev->struct_mutex);
699 priv = obj->driver_private;
700
701 ret = kgsl_gem_alloc_memory(obj);
702
703 if (ret) {
704 DRM_ERROR("Unable to allocate object memory\n");
705 } else if (!priv->mmap_offset) {
706 ret = kgsl_gem_create_mmap_offset(obj);
707 if (ret)
708 DRM_ERROR("Unable to create a mmap offset\n");
709 }
710
711 args->offset = priv->mmap_offset;
712
713 drm_gem_object_unreference(obj);
714 mutex_unlock(&dev->struct_mutex);
715
716 return ret;
717}
718
719int
720kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
721 struct drm_file *file_priv)
722{
723 struct drm_kgsl_gem_mmap *args = data;
724 struct drm_gem_object *obj;
725 unsigned long addr;
726
727 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
728
729 if (obj == NULL) {
730 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
731 return -EBADF;
732 }
733
734 down_write(&current->mm->mmap_sem);
735
736 addr = do_mmap(obj->filp, 0, args->size,
737 PROT_READ | PROT_WRITE, MAP_SHARED,
738 args->offset);
739
740 up_write(&current->mm->mmap_sem);
741
742 mutex_lock(&dev->struct_mutex);
743 drm_gem_object_unreference(obj);
744 mutex_unlock(&dev->struct_mutex);
745
746 if (IS_ERR((void *) addr))
747 return addr;
748
749 args->hostptr = (uint32_t) addr;
750 return 0;
751}
752
753/* This function is deprecated */
754
755int
756kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
757 struct drm_file *file_priv)
758{
759 struct drm_kgsl_gem_prep *args = data;
760 struct drm_gem_object *obj;
761 struct drm_kgsl_gem_object *priv;
762 int ret;
763
764 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
765
766 if (obj == NULL) {
767 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
768 return -EBADF;
769 }
770
771 mutex_lock(&dev->struct_mutex);
772 priv = obj->driver_private;
773
774 ret = kgsl_gem_alloc_memory(obj);
775 if (ret) {
776 DRM_ERROR("Unable to allocate object memory\n");
777 drm_gem_object_unreference(obj);
778 mutex_unlock(&dev->struct_mutex);
779 return ret;
780 }
781
782 if (priv->mmap_offset == 0) {
783 ret = kgsl_gem_create_mmap_offset(obj);
784 if (ret) {
785 drm_gem_object_unreference(obj);
786 mutex_unlock(&dev->struct_mutex);
787 return ret;
788 }
789 }
790
791 args->offset = priv->mmap_offset;
792 args->phys = priv->memdesc.physaddr;
793
794 drm_gem_object_unreference(obj);
795 mutex_unlock(&dev->struct_mutex);
796
797 return 0;
798}
799
800int
801kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
802 struct drm_file *file_priv)
803{
804 struct drm_kgsl_gem_bufinfo *args = data;
805 struct drm_gem_object *obj;
806 struct drm_kgsl_gem_object *priv;
807 int ret = -EINVAL;
808 int index;
809
810 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
811
812 if (obj == NULL) {
813 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
814 return -EBADF;
815 }
816
817 mutex_lock(&dev->struct_mutex);
818 priv = obj->driver_private;
819
820 if (!kgsl_gem_memory_allocated(obj)) {
821 DRM_ERROR("Memory not allocated for this object\n");
822 goto out;
823 }
824
825 for (index = 0; index < priv->bufcount; index++) {
826 args->offset[index] = priv->bufs[index].offset;
827 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
828 }
829
830 args->count = priv->bufcount;
831 args->active = priv->active;
832
833 ret = 0;
834
835out:
836 drm_gem_object_unreference(obj);
837 mutex_unlock(&dev->struct_mutex);
838
839 return ret;
840}
841
842int
843kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file_priv)
845{
846 struct drm_kgsl_gem_bufcount *args = data;
847 struct drm_gem_object *obj;
848 struct drm_kgsl_gem_object *priv;
849 int ret = -EINVAL;
850
851 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
852 return -EINVAL;
853
854 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
855
856 if (obj == NULL) {
857 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
858 return -EBADF;
859 }
860
861 mutex_lock(&dev->struct_mutex);
862 priv = obj->driver_private;
863
864 /* It is too much math to worry about what happens if we are already
865 allocated, so just bail if we are */
866
867 if (kgsl_gem_memory_allocated(obj)) {
868 DRM_ERROR("Memory already allocated - cannot change"
869 "number of buffers\n");
870 goto out;
871 }
872
873 priv->bufcount = args->bufcount;
874 ret = 0;
875
876out:
877 drm_gem_object_unreference(obj);
878 mutex_unlock(&dev->struct_mutex);
879
880 return ret;
881}
882
883int
884kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
885 struct drm_file *file_priv)
886{
887 struct drm_kgsl_gem_active *args = data;
888 struct drm_gem_object *obj;
889 struct drm_kgsl_gem_object *priv;
890 int ret = -EINVAL;
891
892 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
893
894 if (obj == NULL) {
895 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
896 return -EBADF;
897 }
898
899 mutex_lock(&dev->struct_mutex);
900 priv = obj->driver_private;
901
902 if (args->active < 0 || args->active >= priv->bufcount) {
903 DRM_ERROR("Invalid active buffer %d\n", args->active);
904 goto out;
905 }
906
907 priv->active = args->active;
908 ret = 0;
909
910out:
911 drm_gem_object_unreference(obj);
912 mutex_unlock(&dev->struct_mutex);
913
914 return ret;
915}
916
917int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
918{
919 struct drm_gem_object *obj = vma->vm_private_data;
920 struct drm_device *dev = obj->dev;
921 struct drm_kgsl_gem_object *priv;
922 unsigned long offset, pg;
923 struct page *page;
924
925 mutex_lock(&dev->struct_mutex);
926
927 priv = obj->driver_private;
928
929 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
930 pg = (unsigned long) priv->memdesc.hostptr + offset;
931
932 page = vmalloc_to_page((void *) pg);
933 if (!page) {
934 mutex_unlock(&dev->struct_mutex);
935 return VM_FAULT_SIGBUS;
936 }
937
938 get_page(page);
939 vmf->page = page;
940
941 mutex_unlock(&dev->struct_mutex);
942 return 0;
943}
944
945int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
946{
947 struct drm_gem_object *obj = vma->vm_private_data;
948 struct drm_device *dev = obj->dev;
949 struct drm_kgsl_gem_object *priv;
950 unsigned long offset, pfn;
951 int ret = 0;
952
953 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
954 PAGE_SHIFT;
955
956 mutex_lock(&dev->struct_mutex);
957
958 priv = obj->driver_private;
959
960 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
961 ret = vm_insert_pfn(vma,
962 (unsigned long) vmf->virtual_address, pfn);
963 mutex_unlock(&dev->struct_mutex);
964
965 switch (ret) {
966 case -ENOMEM:
967 case -EAGAIN:
968 return VM_FAULT_OOM;
969 case -EFAULT:
970 return VM_FAULT_SIGBUS;
971 default:
972 return VM_FAULT_NOPAGE;
973 }
974}
975
976static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
977 .fault = kgsl_gem_kmem_fault,
978 .open = drm_gem_vm_open,
979 .close = drm_gem_vm_close,
980};
981
982static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
983 .fault = kgsl_gem_phys_fault,
984 .open = drm_gem_vm_open,
985 .close = drm_gem_vm_close,
986};
987
988/* This is a clone of the standard drm_gem_mmap function modified to allow
989 us to properly map KMEM regions as well as the PMEM regions */
990
991int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
992{
993 struct drm_file *priv = filp->private_data;
994 struct drm_device *dev = priv->minor->dev;
995 struct drm_gem_mm *mm = dev->mm_private;
996 struct drm_local_map *map = NULL;
997 struct drm_gem_object *obj;
998 struct drm_hash_item *hash;
999 struct drm_kgsl_gem_object *gpriv;
1000 int ret = 0;
1001
1002 mutex_lock(&dev->struct_mutex);
1003
1004 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
1005 mutex_unlock(&dev->struct_mutex);
1006 return drm_mmap(filp, vma);
1007 }
1008
1009 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
1010 if (!map ||
1011 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
1012 ret = -EPERM;
1013 goto out_unlock;
1014 }
1015
1016 /* Check for valid size. */
1017 if (map->size < vma->vm_end - vma->vm_start) {
1018 ret = -EINVAL;
1019 goto out_unlock;
1020 }
1021
1022 obj = map->handle;
1023
1024 gpriv = obj->driver_private;
1025
1026 /* VM_PFNMAP is only for memory that doesn't use struct page
1027 * in other words, not "normal" memory. If you try to use it
1028 * with "normal" memory then the mappings don't get flushed. */
1029
1030 if (TYPE_IS_MEM(gpriv->type)) {
1031 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1032 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1033 } else {
1034 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1035 VM_DONTEXPAND;
1036 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1037 }
1038
1039 vma->vm_private_data = map->handle;
1040
1041
1042 /* Take care of requested caching policy */
1043 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1044 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1045 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1046 vma->vm_page_prot =
1047 pgprot_writebackwacache(vma->vm_page_prot);
1048 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1049 vma->vm_page_prot =
1050 pgprot_writebackcache(vma->vm_page_prot);
1051 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1052 vma->vm_page_prot =
1053 pgprot_writethroughcache(vma->vm_page_prot);
1054 else
1055 vma->vm_page_prot =
1056 pgprot_writecombine(vma->vm_page_prot);
1057 } else {
1058 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1059 vma->vm_page_prot =
1060 pgprot_noncached(vma->vm_page_prot);
1061 else
1062 /* default pmem is WC */
1063 vma->vm_page_prot =
1064 pgprot_writecombine(vma->vm_page_prot);
1065 }
1066
1067 /* flush out existing KMEM cached mappings if new ones are
1068 * of uncached type */
1069 if (IS_MEM_UNCACHED(gpriv->type))
1070 kgsl_cache_range_op(&gpriv->memdesc,
1071 KGSL_CACHE_OP_FLUSH);
1072
1073 /* Add the other memory types here */
1074
1075 /* Take a ref for this mapping of the object, so that the fault
1076 * handler can dereference the mmap offset's pointer to the object.
1077 * This reference is cleaned up by the corresponding vm_close
1078 * (which should happen whether the vma was created by this call, or
1079 * by a vm_open due to mremap or partial unmap or whatever).
1080 */
1081 drm_gem_object_reference(obj);
1082
1083 vma->vm_file = filp; /* Needed for drm_vm_open() */
1084 drm_vm_open_locked(vma);
1085
1086out_unlock:
1087 mutex_unlock(&dev->struct_mutex);
1088
1089 return ret;
1090}
1091
1092void
1093cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1094{
1095 int j;
1096 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1097 struct drm_kgsl_gem_object *unlock_obj;
1098 struct drm_gem_object *obj;
1099 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1100
1101 fence->ts_valid = 0;
1102 fence->timestamp = -1;
1103 fence->ts_device = -1;
1104
1105 /* Walk the list of buffers in this fence and clean up the */
1106 /* references. Note that this can cause memory allocations */
1107 /* to be freed */
1108 for (j = fence->num_buffers; j > 0; j--) {
1109 this_fence_entry =
1110 (struct drm_kgsl_gem_object_fence_list_entry *)
1111 fence->buffers_in_fence.prev;
1112
1113 this_fence_entry->in_use = 0;
1114 obj = this_fence_entry->gem_obj;
1115 unlock_obj = obj->driver_private;
1116
1117 /* Delete it from the list */
1118
1119 list_del(&this_fence_entry->list);
1120
1121 /* we are unlocking - see if there are other pids waiting */
1122 if (check_waiting) {
1123 if (!list_empty(&unlock_obj->wait_list)) {
1124 lock_next =
1125 (struct drm_kgsl_gem_object_wait_list_entry *)
1126 unlock_obj->wait_list.prev;
1127
1128 list_del((struct list_head *)&lock_next->list);
1129
1130 unlock_obj->lockpid = 0;
1131 wake_up_interruptible(
1132 &lock_next->process_wait_q);
1133 lock_next->pid = 0;
1134
1135 } else {
1136 /* List is empty so set pid to 0 */
1137 unlock_obj->lockpid = 0;
1138 }
1139 }
1140
1141 drm_gem_object_unreference(obj);
1142 }
1143 /* here all the buffers in the fence are released */
1144 /* clear the fence entry */
1145 fence->fence_id = ENTRY_EMPTY;
1146}
1147
1148int
1149find_empty_fence(void)
1150{
1151 int i;
1152
1153 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1154 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1155 gem_buf_fence[i].fence_id = fence_id++;
1156 gem_buf_fence[i].ts_valid = 0;
1157 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1158 if (fence_id == 0xFFFFFFF0)
1159 fence_id = 1;
1160 return i;
1161 } else {
1162
1163 /* Look for entries to be cleaned up */
1164 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1165 cleanup_fence(&gem_buf_fence[i], 0);
1166 }
1167 }
1168
1169 return ENTRY_EMPTY;
1170}
1171
1172int
1173find_fence(int index)
1174{
1175 int i;
1176
1177 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1178 if (gem_buf_fence[i].fence_id == index)
1179 return i;
1180 }
1181
1182 return ENTRY_EMPTY;
1183}
1184
1185void
1186wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1187{
1188 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1189 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1190 struct drm_kgsl_gem_object *unlock_obj;
1191 struct drm_gem_object *obj;
1192
1193 /* TS has expired when we get here */
1194 fence->ts_valid = 0;
1195 fence->timestamp = -1;
1196 fence->ts_device = -1;
1197
1198 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1199 obj = this_fence_entry->gem_obj;
1200 unlock_obj = obj->driver_private;
1201
1202 if (!list_empty(&unlock_obj->wait_list)) {
1203 lock_next =
1204 (struct drm_kgsl_gem_object_wait_list_entry *)
1205 unlock_obj->wait_list.prev;
1206
1207 /* Unblock the pid */
1208 lock_next->pid = 0;
1209
1210 /* Delete it from the list */
1211 list_del((struct list_head *)&lock_next->list);
1212
1213 unlock_obj->lockpid = 0;
1214 wake_up_interruptible(&lock_next->process_wait_q);
1215
1216 } else {
1217 /* List is empty so set pid to 0 */
1218 unlock_obj->lockpid = 0;
1219 }
1220 }
1221 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1222}
1223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224int
1225kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1226 struct drm_file *file_priv)
1227{
1228 /* The purpose of this function is to lock a given set of handles. */
1229 /* The driver will maintain a list of locked handles. */
1230 /* If a request comes in for a handle that's locked the thread will */
1231 /* block until it's no longer in use. */
1232
1233 struct drm_kgsl_gem_lock_handles *args = data;
1234 struct drm_gem_object *obj;
1235 struct drm_kgsl_gem_object *priv;
1236 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1237 struct drm_kgsl_gem_object_fence *fence;
1238 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1239 int i, j;
1240 int result = 0;
1241 uint32_t *lock_list;
1242 uint32_t *work_list = NULL;
1243 int32_t fence_index;
1244
1245 /* copy in the data from user space */
1246 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1247 if (!lock_list) {
1248 DRM_ERROR("Unable allocate memory for lock list\n");
1249 result = -ENOMEM;
1250 goto error;
1251 }
1252
1253 if (copy_from_user(lock_list, args->handle_list,
1254 sizeof(uint32_t) * args->num_handles)) {
1255 DRM_ERROR("Unable to copy the lock list from the user\n");
1256 result = -EFAULT;
1257 goto free_handle_list;
1258 }
1259
1260
1261 work_list = lock_list;
1262 mutex_lock(&dev->struct_mutex);
1263
1264 /* build the fence for this group of handles */
1265 fence_index = find_empty_fence();
1266 if (fence_index == ENTRY_EMPTY) {
1267 DRM_ERROR("Unable to find a empty fence\n");
1268 args->lock_id = 0xDEADBEEF;
1269 result = -EFAULT;
1270 goto out_unlock;
1271 }
1272
1273 fence = &gem_buf_fence[fence_index];
1274 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1275 args->lock_id = gem_buf_fence[fence_index].fence_id;
1276
1277 for (j = args->num_handles; j > 0; j--, lock_list++) {
1278 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1279
1280 if (obj == NULL) {
1281 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1282 result = -EBADF;
1283 goto out_unlock;
1284 }
1285
1286 priv = obj->driver_private;
1287 this_fence_entry = NULL;
1288
1289 /* get a fence entry to hook into the fence */
1290 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1291 if (!priv->fence_entries[i].in_use) {
1292 this_fence_entry = &priv->fence_entries[i];
1293 this_fence_entry->in_use = 1;
1294 break;
1295 }
1296 }
1297
1298 if (this_fence_entry == NULL) {
1299 fence->num_buffers = 0;
1300 fence->fence_id = ENTRY_EMPTY;
1301 args->lock_id = 0xDEADBEAD;
1302 result = -EFAULT;
1303 drm_gem_object_unreference(obj);
1304 goto out_unlock;
1305 }
1306
1307 /* We're trying to lock - add to a fence */
1308 list_add((struct list_head *)this_fence_entry,
1309 &gem_buf_fence[fence_index].buffers_in_fence);
1310 if (priv->lockpid) {
1311
1312 if (priv->lockpid == args->pid) {
1313 /* now that things are running async this */
1314 /* happens when an op isn't done */
1315 /* so it's already locked by the calling pid */
1316 continue;
1317 }
1318
1319
1320 /* if a pid already had it locked */
1321 /* create and add to wait list */
1322 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1323 if (priv->wait_entries[i].in_use == 0) {
1324 /* this one is empty */
1325 lock_item = &priv->wait_entries[i];
1326 lock_item->in_use = 1;
1327 lock_item->pid = args->pid;
1328 INIT_LIST_HEAD((struct list_head *)
1329 &priv->wait_entries[i]);
1330 break;
1331 }
1332 }
1333
1334 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1335
1336 result = -EFAULT;
1337 drm_gem_object_unreference(obj);
1338 goto out_unlock;
1339 }
1340
1341 list_add_tail((struct list_head *)&lock_item->list,
1342 &priv->wait_list);
1343 mutex_unlock(&dev->struct_mutex);
1344 /* here we need to block */
1345 wait_event_interruptible_timeout(
1346 priv->wait_entries[i].process_wait_q,
1347 (priv->lockpid == 0),
1348 msecs_to_jiffies(64));
1349 mutex_lock(&dev->struct_mutex);
1350 lock_item->in_use = 0;
1351 }
1352
1353 /* Getting here means no one currently holds the lock */
1354 priv->lockpid = args->pid;
1355
1356 args->lock_id = gem_buf_fence[fence_index].fence_id;
1357 }
1358 fence->lockpid = args->pid;
1359
1360out_unlock:
1361 mutex_unlock(&dev->struct_mutex);
1362
1363free_handle_list:
1364 kfree(work_list);
1365
1366error:
1367 return result;
1368}
1369
1370int
1371kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1372 struct drm_file *file_priv)
1373{
1374 struct drm_kgsl_gem_unlock_handles *args = data;
1375 int result = 0;
1376 int32_t fence_index;
1377
1378 mutex_lock(&dev->struct_mutex);
1379 fence_index = find_fence(args->lock_id);
1380 if (fence_index == ENTRY_EMPTY) {
1381 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1382 result = -EFAULT;
1383 goto out_unlock;
1384 }
1385
1386 cleanup_fence(&gem_buf_fence[fence_index], 1);
1387
1388out_unlock:
1389 mutex_unlock(&dev->struct_mutex);
1390
1391 return result;
1392}
1393
1394
1395int
1396kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1397 struct drm_file *file_priv)
1398{
1399 struct drm_kgsl_gem_unlock_on_ts *args = data;
1400 int result = 0;
1401 int ts_done = 0;
1402 int32_t fence_index, ts_device;
1403 struct drm_kgsl_gem_object_fence *fence;
1404 struct kgsl_device *device;
1405
1406 if (args->type == DRM_KGSL_GEM_TS_3D)
1407 ts_device = KGSL_DEVICE_3D0;
1408 else if (args->type == DRM_KGSL_GEM_TS_2D)
1409 ts_device = KGSL_DEVICE_2D0;
1410 else {
1411 result = -EINVAL;
1412 goto error;
1413 }
1414
1415 device = kgsl_get_device(ts_device);
1416 ts_done = kgsl_check_timestamp(device, args->timestamp);
1417
1418 mutex_lock(&dev->struct_mutex);
1419
1420 fence_index = find_fence(args->lock_id);
1421 if (fence_index == ENTRY_EMPTY) {
1422 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1423 result = -EFAULT;
1424 goto out_unlock;
1425 }
1426
1427 fence = &gem_buf_fence[fence_index];
1428 fence->ts_device = ts_device;
1429
1430 if (!ts_done)
1431 fence->ts_valid = 1;
1432 else
1433 cleanup_fence(fence, 1);
1434
1435
1436out_unlock:
1437 mutex_unlock(&dev->struct_mutex);
1438
1439error:
1440 return result;
1441}
1442
1443struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1444 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1445 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1446 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1447 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1448 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1449 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1450 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1451 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1452 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1453 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1454 kgsl_gem_set_bufcount_ioctl, 0),
1455 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1456 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1457 kgsl_gem_lock_handle_ioctl, 0),
1458 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1459 kgsl_gem_unlock_handle_ioctl, 0),
1460 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1461 kgsl_gem_unlock_on_ts_ioctl, 0),
1462 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1463 DRM_MASTER),
1464};
1465
1466static struct drm_driver driver = {
Michael Street8bacdd02012-01-05 14:55:01 -08001467 .driver_features = DRIVER_GEM,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001468 .load = kgsl_drm_load,
1469 .unload = kgsl_drm_unload,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 .preclose = kgsl_drm_preclose,
1471 .suspend = kgsl_drm_suspend,
1472 .resume = kgsl_drm_resume,
1473 .reclaim_buffers = drm_core_reclaim_buffers,
1474 .gem_init_object = kgsl_gem_init_object,
1475 .gem_free_object = kgsl_gem_free_object,
1476 .ioctls = kgsl_drm_ioctls,
1477
1478 .fops = {
1479 .owner = THIS_MODULE,
1480 .open = drm_open,
1481 .release = drm_release,
1482 .unlocked_ioctl = drm_ioctl,
1483 .mmap = msm_drm_gem_mmap,
1484 .poll = drm_poll,
1485 .fasync = drm_fasync,
1486 },
1487
1488 .name = DRIVER_NAME,
1489 .desc = DRIVER_DESC,
1490 .date = DRIVER_DATE,
1491 .major = DRIVER_MAJOR,
1492 .minor = DRIVER_MINOR,
1493 .patchlevel = DRIVER_PATCHLEVEL,
1494};
1495
1496int kgsl_drm_init(struct platform_device *dev)
1497{
1498 int i;
1499
Michael Street8bacdd02012-01-05 14:55:01 -08001500 /* Only initialize once */
1501 if (kgsl_drm_inited == DRM_KGSL_INITED)
1502 return 0;
1503
1504 kgsl_drm_inited = DRM_KGSL_INITED;
1505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507
1508 INIT_LIST_HEAD(&kgsl_mem_list);
1509
1510 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1511 gem_buf_fence[i].num_buffers = 0;
1512 gem_buf_fence[i].ts_valid = 0;
1513 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1514 }
1515
Michael Street8bacdd02012-01-05 14:55:01 -08001516 return drm_platform_init(&driver, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517}
1518
1519void kgsl_drm_exit(void)
1520{
Michael Street8bacdd02012-01-05 14:55:01 -08001521 kgsl_drm_inited = DRM_KGSL_NOT_INITED;
1522 drm_platform_exit(&driver, driver.kdriver.platform_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001523}