blob: 33f4b95910a0ef3737a4bfc081207c0afa93e24c [file] [log] [blame]
Michael Street8bacdd02012-01-05 14:55:01 -08001/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_device.h"
23#include "kgsl_drm.h"
24#include "kgsl_mmu.h"
25#include "kgsl_sharedmem.h"
26
27#define DRIVER_AUTHOR "Qualcomm"
28#define DRIVER_NAME "kgsl"
29#define DRIVER_DESC "KGSL DRM"
30#define DRIVER_DATE "20100127"
31
32#define DRIVER_MAJOR 2
33#define DRIVER_MINOR 1
34#define DRIVER_PATCHLEVEL 1
35
36#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
37
38#define ENTRY_EMPTY -1
39#define ENTRY_NEEDS_CLEANUP -2
40
Michael Street8bacdd02012-01-05 14:55:01 -080041#define DRM_KGSL_NOT_INITED -1
42#define DRM_KGSL_INITED 1
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
45#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
46
47/* Returns true if the memory type is in PMEM */
48
49#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
50#define TYPE_IS_PMEM(_t) \
51 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
52 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
53 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
54#else
55#define TYPE_IS_PMEM(_t) \
56 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
57 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
58#endif
59
60/* Returns true if the memory type is regular */
61
62#define TYPE_IS_MEM(_t) \
63 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
64 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
65 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
66
67#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
68
69/* Returns true if KMEM region is uncached */
70
71#define IS_MEM_UNCACHED(_t) \
72 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
73 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
74 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
75
76struct drm_kgsl_gem_object_wait_list_entry {
77 struct list_head list;
78 int pid;
79 int in_use;
80 wait_queue_head_t process_wait_q;
81};
82
83struct drm_kgsl_gem_object_fence {
84 int32_t fence_id;
85 unsigned int num_buffers;
86 int ts_valid;
87 unsigned int timestamp;
88 int ts_device;
89 int lockpid;
90 struct list_head buffers_in_fence;
91};
92
93struct drm_kgsl_gem_object_fence_list_entry {
94 struct list_head list;
95 int in_use;
96 struct drm_gem_object *gem_obj;
97};
98
99static int32_t fence_id = 0x1;
100
101static struct drm_kgsl_gem_object_fence
102 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
103
104struct drm_kgsl_gem_object {
105 struct drm_gem_object *obj;
106 uint32_t type;
107 struct kgsl_memdesc memdesc;
108 struct kgsl_pagetable *pagetable;
109 uint64_t mmap_offset;
110 int bufcount;
111 int flags;
112 struct list_head list;
113 int active;
114
115 struct {
116 uint32_t offset;
117 uint32_t gpuaddr;
118 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
119
120 int bound;
121 int lockpid;
122 /* Put these here to avoid allocing all the time */
123 struct drm_kgsl_gem_object_wait_list_entry
124 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
125 /* Each object can only appear in a single fence */
126 struct drm_kgsl_gem_object_fence_list_entry
127 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
128
129 struct list_head wait_list;
130};
131
Michael Street8bacdd02012-01-05 14:55:01 -0800132static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134/* This is a global list of all the memory currently mapped in the MMU */
135static struct list_head kgsl_mem_list;
136
137static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
138{
139 int cacheop = 0;
140
141 switch (op) {
142 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
143 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
144 DRM_KGSL_GEM_CACHE_WBACKWA))
145 cacheop = KGSL_CACHE_OP_CLEAN;
146
147 break;
148
149 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
150 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
151 DRM_KGSL_GEM_CACHE_WBACKWA |
152 DRM_KGSL_GEM_CACHE_WTHROUGH))
153 cacheop = KGSL_CACHE_OP_INV;
154 }
155
156 kgsl_cache_range_op(memdesc, cacheop);
157}
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159/* TODO:
160 * Add vsync wait */
161
162static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
163{
164 return 0;
165}
166
167static int kgsl_drm_unload(struct drm_device *dev)
168{
169 return 0;
170}
171
172struct kgsl_drm_device_priv {
173 struct kgsl_device *device[KGSL_DEVICE_MAX];
174 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
175};
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
178{
179}
180
181static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
182{
183 return 0;
184}
185
186static int kgsl_drm_resume(struct drm_device *dev)
187{
188 return 0;
189}
190
191static void
192kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
193{
194 struct drm_device *dev = obj->dev;
195 struct drm_gem_mm *mm = dev->mm_private;
196 struct drm_kgsl_gem_object *priv = obj->driver_private;
197 struct drm_map_list *list;
198
199 list = &obj->map_list;
200 drm_ht_remove_item(&mm->offset_hash, &list->hash);
201 if (list->file_offset_node) {
202 drm_mm_put_block(list->file_offset_node);
203 list->file_offset_node = NULL;
204 }
205
206 kfree(list->map);
207 list->map = NULL;
208
209 priv->mmap_offset = 0;
210}
211
212static int
213kgsl_gem_memory_allocated(struct drm_gem_object *obj)
214{
215 struct drm_kgsl_gem_object *priv = obj->driver_private;
216 return priv->memdesc.size ? 1 : 0;
217}
218
219static int
220kgsl_gem_alloc_memory(struct drm_gem_object *obj)
221{
222 struct drm_kgsl_gem_object *priv = obj->driver_private;
223 int index;
Michael Street8bacdd02012-01-05 14:55:01 -0800224 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225
226 /* Return if the memory is already allocated */
227
228 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
229 return 0;
230
Michael Street8bacdd02012-01-05 14:55:01 -0800231 if (priv->pagetable == NULL) {
232 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
233
234 if (priv->pagetable == NULL) {
235 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
236 return -EINVAL;
237 }
238 }
239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 if (TYPE_IS_PMEM(priv->type)) {
241 int type;
242
243 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
Michael Street8bacdd02012-01-05 14:55:01 -0800244 priv->type & DRM_KGSL_GEM_PMEM_EBI) {
245 type = PMEM_MEMTYPE_EBI1;
246 result = kgsl_sharedmem_ebimem_user(
247 &priv->memdesc,
248 priv->pagetable,
249 obj->size * priv->bufcount,
250 0);
251 if (result) {
252 DRM_ERROR(
253 "Unable to allocate PMEM memory\n");
254 return result;
255 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256 }
Michael Street8bacdd02012-01-05 14:55:01 -0800257 else
258 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259
260 } else if (TYPE_IS_MEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261
Michael Street8bacdd02012-01-05 14:55:01 -0800262 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
263 priv->type & DRM_KGSL_GEM_CACHE_MASK)
264 list_add(&priv->list, &kgsl_mem_list);
265
266 result = kgsl_sharedmem_vmalloc_user(&priv->memdesc,
267 priv->pagetable,
268 obj->size * priv->bufcount, 0);
269
270 if (result != 0) {
271 DRM_ERROR(
272 "Unable to allocate Vmalloc user memory\n");
273 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 } else
276 return -EINVAL;
277
Michael Street8bacdd02012-01-05 14:55:01 -0800278 for (index = 0; index < priv->bufcount; index++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 priv->bufs[index].offset = index * obj->size;
Michael Street8bacdd02012-01-05 14:55:01 -0800280 priv->bufs[index].gpuaddr =
281 priv->memdesc.gpuaddr +
282 priv->bufs[index].offset;
283 }
284 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700285
286 return 0;
287}
288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290kgsl_gem_free_memory(struct drm_gem_object *obj)
291{
292 struct drm_kgsl_gem_object *priv = obj->driver_private;
293
294 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
295 return;
296
297 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
298 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 kgsl_sharedmem_free(&priv->memdesc);
Michael Street8bacdd02012-01-05 14:55:01 -0800301
302 kgsl_mmu_putpagetable(priv->pagetable);
303 priv->pagetable = NULL;
304
305 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
306 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
307 list_del(&priv->list);
308
309 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
310
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311}
312
313int
314kgsl_gem_init_object(struct drm_gem_object *obj)
315{
316 struct drm_kgsl_gem_object *priv;
317 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
318 if (priv == NULL) {
319 DRM_ERROR("Unable to create GEM object\n");
320 return -ENOMEM;
321 }
322
323 obj->driver_private = priv;
324 priv->obj = obj;
325
326 return 0;
327}
328
329void
330kgsl_gem_free_object(struct drm_gem_object *obj)
331{
332 kgsl_gem_free_memory(obj);
333 kgsl_gem_free_mmap_offset(obj);
334 drm_gem_object_release(obj);
335 kfree(obj->driver_private);
336}
337
338static int
339kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
340{
341 struct drm_device *dev = obj->dev;
342 struct drm_gem_mm *mm = dev->mm_private;
343 struct drm_kgsl_gem_object *priv = obj->driver_private;
344 struct drm_map_list *list;
345 int msize;
346
347 list = &obj->map_list;
348 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
349 if (list->map == NULL) {
350 DRM_ERROR("Unable to allocate drm_map_list\n");
351 return -ENOMEM;
352 }
353
354 msize = obj->size * priv->bufcount;
355
356 list->map->type = _DRM_GEM;
357 list->map->size = msize;
358 list->map->handle = obj;
359
360 /* Allocate a mmap offset */
361 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
362 msize / PAGE_SIZE,
363 0, 0);
364
365 if (!list->file_offset_node) {
366 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
367 kfree(list->map);
368 return -ENOMEM;
369 }
370
371 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
372 msize / PAGE_SIZE, 0);
373
374 if (!list->file_offset_node) {
375 DRM_ERROR("Unable to create the file_offset_node\n");
376 kfree(list->map);
377 return -ENOMEM;
378 }
379
380 list->hash.key = list->file_offset_node->start;
381 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
382 DRM_ERROR("Failed to add to map hash\n");
383 drm_mm_put_block(list->file_offset_node);
384 kfree(list->map);
385 return -ENOMEM;
386 }
387
388 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
389
390 return 0;
391}
392
393int
394kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
395 unsigned long *len)
396{
397 struct file *filp;
398 struct drm_device *dev;
399 struct drm_file *file_priv;
400 struct drm_gem_object *obj;
401 struct drm_kgsl_gem_object *priv;
402 int ret = 0;
403
404 filp = fget(drm_fd);
405 if (unlikely(filp == NULL)) {
Michael Street8bacdd02012-01-05 14:55:01 -0800406 DRM_ERROR("Unable to get the DRM file descriptor\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 return -EINVAL;
408 }
409 file_priv = filp->private_data;
410 if (unlikely(file_priv == NULL)) {
411 DRM_ERROR("Unable to get the file private data\n");
412 fput(filp);
413 return -EINVAL;
414 }
415 dev = file_priv->minor->dev;
416 if (unlikely(dev == NULL)) {
417 DRM_ERROR("Unable to get the minor device\n");
418 fput(filp);
419 return -EINVAL;
420 }
421
422 obj = drm_gem_object_lookup(dev, file_priv, handle);
423 if (unlikely(obj == NULL)) {
424 DRM_ERROR("Invalid GEM handle %x\n", handle);
425 fput(filp);
426 return -EBADF;
427 }
428
429 mutex_lock(&dev->struct_mutex);
430 priv = obj->driver_private;
431
432 /* We can only use the MDP for PMEM regions */
433
434 if (TYPE_IS_PMEM(priv->type)) {
435 *start = priv->memdesc.physaddr +
436 priv->bufs[priv->active].offset;
437
438 *len = priv->memdesc.size;
439
440 kgsl_gem_mem_flush(&priv->memdesc,
441 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
442 } else {
443 *start = 0;
444 *len = 0;
445 ret = -EINVAL;
446 }
447
448 drm_gem_object_unreference(obj);
449 mutex_unlock(&dev->struct_mutex);
450
451 fput(filp);
452 return ret;
453}
454
455static int
456kgsl_gem_init_obj(struct drm_device *dev,
457 struct drm_file *file_priv,
458 struct drm_gem_object *obj,
459 int *handle)
460{
461 struct drm_kgsl_gem_object *priv;
462 int ret, i;
463
464 mutex_lock(&dev->struct_mutex);
465 priv = obj->driver_private;
466
467 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
468 priv->bufcount = 1;
469 priv->active = 0;
470 priv->bound = 0;
471
472 /* To preserve backwards compatability, the default memory source
473 is EBI */
474
475 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
476
477 ret = drm_gem_handle_create(file_priv, obj, handle);
478
Michael Street8bacdd02012-01-05 14:55:01 -0800479 drm_gem_object_unreference(obj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700480 INIT_LIST_HEAD(&priv->wait_list);
481
482 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
483 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
484 priv->wait_entries[i].pid = 0;
485 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
486 }
487
488 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
489 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
490 priv->fence_entries[i].in_use = 0;
491 priv->fence_entries[i].gem_obj = obj;
492 }
493
494 mutex_unlock(&dev->struct_mutex);
495 return ret;
496}
497
498int
499kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
500 struct drm_file *file_priv)
501{
502 struct drm_kgsl_gem_create *create = data;
503 struct drm_gem_object *obj;
504 int ret, handle;
505
506 /* Page align the size so we can allocate multiple buffers */
507 create->size = ALIGN(create->size, 4096);
508
509 obj = drm_gem_object_alloc(dev, create->size);
510
511 if (obj == NULL) {
512 DRM_ERROR("Unable to allocate the GEM object\n");
513 return -ENOMEM;
514 }
515
516 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
517 if (ret)
518 return ret;
519
520 create->handle = handle;
521 return 0;
522}
523
524int
525kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
526 struct drm_file *file_priv)
527{
528 struct drm_kgsl_gem_create_fd *args = data;
529 struct file *file;
530 dev_t rdev;
531 struct fb_info *info;
532 struct drm_gem_object *obj;
533 struct drm_kgsl_gem_object *priv;
534 int ret, put_needed, handle;
535
536 file = fget_light(args->fd, &put_needed);
537
538 if (file == NULL) {
539 DRM_ERROR("Unable to get the file object\n");
540 return -EBADF;
541 }
542
543 rdev = file->f_dentry->d_inode->i_rdev;
544
545 /* Only framebuffer objects are supported ATM */
546
547 if (MAJOR(rdev) != FB_MAJOR) {
548 DRM_ERROR("File descriptor is not a framebuffer\n");
549 ret = -EBADF;
550 goto error_fput;
551 }
552
553 info = registered_fb[MINOR(rdev)];
554
555 if (info == NULL) {
556 DRM_ERROR("Framebuffer minor %d is not registered\n",
557 MINOR(rdev));
558 ret = -EBADF;
559 goto error_fput;
560 }
561
562 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
563
564 if (obj == NULL) {
565 DRM_ERROR("Unable to allocate GEM object\n");
566 ret = -ENOMEM;
567 goto error_fput;
568 }
569
570 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
571
572 if (ret)
573 goto error_fput;
574
575 mutex_lock(&dev->struct_mutex);
576
577 priv = obj->driver_private;
578 priv->memdesc.physaddr = info->fix.smem_start;
579 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
580
581 mutex_unlock(&dev->struct_mutex);
582 args->handle = handle;
583
584error_fput:
585 fput_light(file, put_needed);
586
587 return ret;
588}
589
590int
591kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
592 struct drm_file *file_priv)
593{
594 struct drm_kgsl_gem_memtype *args = data;
595 struct drm_gem_object *obj;
596 struct drm_kgsl_gem_object *priv;
597 int ret = 0;
598
599 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
600
601 if (obj == NULL) {
602 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
603 return -EBADF;
604 }
605
606 mutex_lock(&dev->struct_mutex);
607 priv = obj->driver_private;
608
609 if (TYPE_IS_FD(priv->type))
610 ret = -EINVAL;
611 else {
612 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
613 priv->type = args->type;
614 else
615 ret = -EINVAL;
616 }
617
618 drm_gem_object_unreference(obj);
619 mutex_unlock(&dev->struct_mutex);
620
621 return ret;
622}
623
624int
625kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
626 struct drm_file *file_priv)
627{
628 struct drm_kgsl_gem_memtype *args = data;
629 struct drm_gem_object *obj;
630 struct drm_kgsl_gem_object *priv;
631
632 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
633
634 if (obj == NULL) {
635 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
636 return -EBADF;
637 }
638
639 mutex_lock(&dev->struct_mutex);
640 priv = obj->driver_private;
641
642 args->type = priv->type;
643
644 drm_gem_object_unreference(obj);
645 mutex_unlock(&dev->struct_mutex);
646
647 return 0;
648}
649
650int
651kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv)
653{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654 return 0;
655}
656
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657int
658kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
659 struct drm_file *file_priv)
660{
Michael Street8bacdd02012-01-05 14:55:01 -0800661 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662}
663
664/* Allocate the memory and prepare it for CPU mapping */
665
666int
667kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
668 struct drm_file *file_priv)
669{
670 struct drm_kgsl_gem_alloc *args = data;
671 struct drm_gem_object *obj;
672 struct drm_kgsl_gem_object *priv;
673 int ret;
674
675 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
676
677 if (obj == NULL) {
678 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
679 return -EBADF;
680 }
681
682 mutex_lock(&dev->struct_mutex);
683 priv = obj->driver_private;
684
685 ret = kgsl_gem_alloc_memory(obj);
686
687 if (ret) {
688 DRM_ERROR("Unable to allocate object memory\n");
689 } else if (!priv->mmap_offset) {
690 ret = kgsl_gem_create_mmap_offset(obj);
691 if (ret)
692 DRM_ERROR("Unable to create a mmap offset\n");
693 }
694
695 args->offset = priv->mmap_offset;
696
697 drm_gem_object_unreference(obj);
698 mutex_unlock(&dev->struct_mutex);
699
700 return ret;
701}
702
703int
704kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
705 struct drm_file *file_priv)
706{
707 struct drm_kgsl_gem_mmap *args = data;
708 struct drm_gem_object *obj;
709 unsigned long addr;
710
711 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
712
713 if (obj == NULL) {
714 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
715 return -EBADF;
716 }
717
718 down_write(&current->mm->mmap_sem);
719
720 addr = do_mmap(obj->filp, 0, args->size,
721 PROT_READ | PROT_WRITE, MAP_SHARED,
722 args->offset);
723
724 up_write(&current->mm->mmap_sem);
725
726 mutex_lock(&dev->struct_mutex);
727 drm_gem_object_unreference(obj);
728 mutex_unlock(&dev->struct_mutex);
729
730 if (IS_ERR((void *) addr))
731 return addr;
732
733 args->hostptr = (uint32_t) addr;
734 return 0;
735}
736
737/* This function is deprecated */
738
739int
740kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
741 struct drm_file *file_priv)
742{
743 struct drm_kgsl_gem_prep *args = data;
744 struct drm_gem_object *obj;
745 struct drm_kgsl_gem_object *priv;
746 int ret;
747
748 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
749
750 if (obj == NULL) {
751 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
752 return -EBADF;
753 }
754
755 mutex_lock(&dev->struct_mutex);
756 priv = obj->driver_private;
757
758 ret = kgsl_gem_alloc_memory(obj);
759 if (ret) {
760 DRM_ERROR("Unable to allocate object memory\n");
761 drm_gem_object_unreference(obj);
762 mutex_unlock(&dev->struct_mutex);
763 return ret;
764 }
765
766 if (priv->mmap_offset == 0) {
767 ret = kgsl_gem_create_mmap_offset(obj);
768 if (ret) {
769 drm_gem_object_unreference(obj);
770 mutex_unlock(&dev->struct_mutex);
771 return ret;
772 }
773 }
774
775 args->offset = priv->mmap_offset;
776 args->phys = priv->memdesc.physaddr;
777
778 drm_gem_object_unreference(obj);
779 mutex_unlock(&dev->struct_mutex);
780
781 return 0;
782}
783
784int
785kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
786 struct drm_file *file_priv)
787{
788 struct drm_kgsl_gem_bufinfo *args = data;
789 struct drm_gem_object *obj;
790 struct drm_kgsl_gem_object *priv;
791 int ret = -EINVAL;
792 int index;
793
794 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
795
796 if (obj == NULL) {
797 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
798 return -EBADF;
799 }
800
801 mutex_lock(&dev->struct_mutex);
802 priv = obj->driver_private;
803
804 if (!kgsl_gem_memory_allocated(obj)) {
805 DRM_ERROR("Memory not allocated for this object\n");
806 goto out;
807 }
808
809 for (index = 0; index < priv->bufcount; index++) {
810 args->offset[index] = priv->bufs[index].offset;
811 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
812 }
813
814 args->count = priv->bufcount;
815 args->active = priv->active;
816
817 ret = 0;
818
819out:
820 drm_gem_object_unreference(obj);
821 mutex_unlock(&dev->struct_mutex);
822
823 return ret;
824}
825
826int
827kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
828 struct drm_file *file_priv)
829{
830 struct drm_kgsl_gem_bufcount *args = data;
831 struct drm_gem_object *obj;
832 struct drm_kgsl_gem_object *priv;
833 int ret = -EINVAL;
834
835 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
836 return -EINVAL;
837
838 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
839
840 if (obj == NULL) {
841 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
842 return -EBADF;
843 }
844
845 mutex_lock(&dev->struct_mutex);
846 priv = obj->driver_private;
847
848 /* It is too much math to worry about what happens if we are already
849 allocated, so just bail if we are */
850
851 if (kgsl_gem_memory_allocated(obj)) {
852 DRM_ERROR("Memory already allocated - cannot change"
853 "number of buffers\n");
854 goto out;
855 }
856
857 priv->bufcount = args->bufcount;
858 ret = 0;
859
860out:
861 drm_gem_object_unreference(obj);
862 mutex_unlock(&dev->struct_mutex);
863
864 return ret;
865}
866
867int
868kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
869 struct drm_file *file_priv)
870{
871 struct drm_kgsl_gem_active *args = data;
872 struct drm_gem_object *obj;
873 struct drm_kgsl_gem_object *priv;
874 int ret = -EINVAL;
875
876 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
877
878 if (obj == NULL) {
879 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
880 return -EBADF;
881 }
882
883 mutex_lock(&dev->struct_mutex);
884 priv = obj->driver_private;
885
886 if (args->active < 0 || args->active >= priv->bufcount) {
887 DRM_ERROR("Invalid active buffer %d\n", args->active);
888 goto out;
889 }
890
891 priv->active = args->active;
892 ret = 0;
893
894out:
895 drm_gem_object_unreference(obj);
896 mutex_unlock(&dev->struct_mutex);
897
898 return ret;
899}
900
901int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
902{
903 struct drm_gem_object *obj = vma->vm_private_data;
904 struct drm_device *dev = obj->dev;
905 struct drm_kgsl_gem_object *priv;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600906 unsigned long offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700907 struct page *page;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600908 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700909
910 mutex_lock(&dev->struct_mutex);
911
912 priv = obj->driver_private;
913
914 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600915 i = offset >> PAGE_SHIFT;
916 page = sg_page(&(priv->memdesc.sg[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700918 if (!page) {
919 mutex_unlock(&dev->struct_mutex);
920 return VM_FAULT_SIGBUS;
921 }
922
923 get_page(page);
924 vmf->page = page;
925
926 mutex_unlock(&dev->struct_mutex);
927 return 0;
928}
929
930int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
931{
932 struct drm_gem_object *obj = vma->vm_private_data;
933 struct drm_device *dev = obj->dev;
934 struct drm_kgsl_gem_object *priv;
935 unsigned long offset, pfn;
936 int ret = 0;
937
938 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
939 PAGE_SHIFT;
940
941 mutex_lock(&dev->struct_mutex);
942
943 priv = obj->driver_private;
944
945 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
946 ret = vm_insert_pfn(vma,
947 (unsigned long) vmf->virtual_address, pfn);
948 mutex_unlock(&dev->struct_mutex);
949
950 switch (ret) {
951 case -ENOMEM:
952 case -EAGAIN:
953 return VM_FAULT_OOM;
954 case -EFAULT:
955 return VM_FAULT_SIGBUS;
956 default:
957 return VM_FAULT_NOPAGE;
958 }
959}
960
961static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
962 .fault = kgsl_gem_kmem_fault,
963 .open = drm_gem_vm_open,
964 .close = drm_gem_vm_close,
965};
966
967static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
968 .fault = kgsl_gem_phys_fault,
969 .open = drm_gem_vm_open,
970 .close = drm_gem_vm_close,
971};
972
973/* This is a clone of the standard drm_gem_mmap function modified to allow
974 us to properly map KMEM regions as well as the PMEM regions */
975
976int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
977{
978 struct drm_file *priv = filp->private_data;
979 struct drm_device *dev = priv->minor->dev;
980 struct drm_gem_mm *mm = dev->mm_private;
981 struct drm_local_map *map = NULL;
982 struct drm_gem_object *obj;
983 struct drm_hash_item *hash;
984 struct drm_kgsl_gem_object *gpriv;
985 int ret = 0;
986
987 mutex_lock(&dev->struct_mutex);
988
989 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
990 mutex_unlock(&dev->struct_mutex);
991 return drm_mmap(filp, vma);
992 }
993
994 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
995 if (!map ||
996 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
997 ret = -EPERM;
998 goto out_unlock;
999 }
1000
1001 /* Check for valid size. */
1002 if (map->size < vma->vm_end - vma->vm_start) {
1003 ret = -EINVAL;
1004 goto out_unlock;
1005 }
1006
1007 obj = map->handle;
1008
1009 gpriv = obj->driver_private;
1010
1011 /* VM_PFNMAP is only for memory that doesn't use struct page
1012 * in other words, not "normal" memory. If you try to use it
1013 * with "normal" memory then the mappings don't get flushed. */
1014
1015 if (TYPE_IS_MEM(gpriv->type)) {
1016 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1017 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1018 } else {
1019 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1020 VM_DONTEXPAND;
1021 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1022 }
1023
1024 vma->vm_private_data = map->handle;
1025
1026
1027 /* Take care of requested caching policy */
1028 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1029 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1030 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1031 vma->vm_page_prot =
1032 pgprot_writebackwacache(vma->vm_page_prot);
1033 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1034 vma->vm_page_prot =
1035 pgprot_writebackcache(vma->vm_page_prot);
1036 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1037 vma->vm_page_prot =
1038 pgprot_writethroughcache(vma->vm_page_prot);
1039 else
1040 vma->vm_page_prot =
1041 pgprot_writecombine(vma->vm_page_prot);
1042 } else {
1043 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1044 vma->vm_page_prot =
1045 pgprot_noncached(vma->vm_page_prot);
1046 else
1047 /* default pmem is WC */
1048 vma->vm_page_prot =
1049 pgprot_writecombine(vma->vm_page_prot);
1050 }
1051
1052 /* flush out existing KMEM cached mappings if new ones are
1053 * of uncached type */
1054 if (IS_MEM_UNCACHED(gpriv->type))
1055 kgsl_cache_range_op(&gpriv->memdesc,
1056 KGSL_CACHE_OP_FLUSH);
1057
1058 /* Add the other memory types here */
1059
1060 /* Take a ref for this mapping of the object, so that the fault
1061 * handler can dereference the mmap offset's pointer to the object.
1062 * This reference is cleaned up by the corresponding vm_close
1063 * (which should happen whether the vma was created by this call, or
1064 * by a vm_open due to mremap or partial unmap or whatever).
1065 */
1066 drm_gem_object_reference(obj);
1067
1068 vma->vm_file = filp; /* Needed for drm_vm_open() */
1069 drm_vm_open_locked(vma);
1070
1071out_unlock:
1072 mutex_unlock(&dev->struct_mutex);
1073
1074 return ret;
1075}
1076
1077void
1078cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1079{
1080 int j;
1081 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1082 struct drm_kgsl_gem_object *unlock_obj;
1083 struct drm_gem_object *obj;
1084 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1085
1086 fence->ts_valid = 0;
1087 fence->timestamp = -1;
1088 fence->ts_device = -1;
1089
1090 /* Walk the list of buffers in this fence and clean up the */
1091 /* references. Note that this can cause memory allocations */
1092 /* to be freed */
1093 for (j = fence->num_buffers; j > 0; j--) {
1094 this_fence_entry =
1095 (struct drm_kgsl_gem_object_fence_list_entry *)
1096 fence->buffers_in_fence.prev;
1097
1098 this_fence_entry->in_use = 0;
1099 obj = this_fence_entry->gem_obj;
1100 unlock_obj = obj->driver_private;
1101
1102 /* Delete it from the list */
1103
1104 list_del(&this_fence_entry->list);
1105
1106 /* we are unlocking - see if there are other pids waiting */
1107 if (check_waiting) {
1108 if (!list_empty(&unlock_obj->wait_list)) {
1109 lock_next =
1110 (struct drm_kgsl_gem_object_wait_list_entry *)
1111 unlock_obj->wait_list.prev;
1112
1113 list_del((struct list_head *)&lock_next->list);
1114
1115 unlock_obj->lockpid = 0;
1116 wake_up_interruptible(
1117 &lock_next->process_wait_q);
1118 lock_next->pid = 0;
1119
1120 } else {
1121 /* List is empty so set pid to 0 */
1122 unlock_obj->lockpid = 0;
1123 }
1124 }
1125
1126 drm_gem_object_unreference(obj);
1127 }
1128 /* here all the buffers in the fence are released */
1129 /* clear the fence entry */
1130 fence->fence_id = ENTRY_EMPTY;
1131}
1132
1133int
1134find_empty_fence(void)
1135{
1136 int i;
1137
1138 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1139 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1140 gem_buf_fence[i].fence_id = fence_id++;
1141 gem_buf_fence[i].ts_valid = 0;
1142 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1143 if (fence_id == 0xFFFFFFF0)
1144 fence_id = 1;
1145 return i;
1146 } else {
1147
1148 /* Look for entries to be cleaned up */
1149 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1150 cleanup_fence(&gem_buf_fence[i], 0);
1151 }
1152 }
1153
1154 return ENTRY_EMPTY;
1155}
1156
1157int
1158find_fence(int index)
1159{
1160 int i;
1161
1162 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1163 if (gem_buf_fence[i].fence_id == index)
1164 return i;
1165 }
1166
1167 return ENTRY_EMPTY;
1168}
1169
1170void
1171wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1172{
1173 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1174 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1175 struct drm_kgsl_gem_object *unlock_obj;
1176 struct drm_gem_object *obj;
1177
1178 /* TS has expired when we get here */
1179 fence->ts_valid = 0;
1180 fence->timestamp = -1;
1181 fence->ts_device = -1;
1182
1183 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1184 obj = this_fence_entry->gem_obj;
1185 unlock_obj = obj->driver_private;
1186
1187 if (!list_empty(&unlock_obj->wait_list)) {
1188 lock_next =
1189 (struct drm_kgsl_gem_object_wait_list_entry *)
1190 unlock_obj->wait_list.prev;
1191
1192 /* Unblock the pid */
1193 lock_next->pid = 0;
1194
1195 /* Delete it from the list */
1196 list_del((struct list_head *)&lock_next->list);
1197
1198 unlock_obj->lockpid = 0;
1199 wake_up_interruptible(&lock_next->process_wait_q);
1200
1201 } else {
1202 /* List is empty so set pid to 0 */
1203 unlock_obj->lockpid = 0;
1204 }
1205 }
1206 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1207}
1208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209int
1210kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1211 struct drm_file *file_priv)
1212{
1213 /* The purpose of this function is to lock a given set of handles. */
1214 /* The driver will maintain a list of locked handles. */
1215 /* If a request comes in for a handle that's locked the thread will */
1216 /* block until it's no longer in use. */
1217
1218 struct drm_kgsl_gem_lock_handles *args = data;
1219 struct drm_gem_object *obj;
1220 struct drm_kgsl_gem_object *priv;
1221 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1222 struct drm_kgsl_gem_object_fence *fence;
1223 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1224 int i, j;
1225 int result = 0;
1226 uint32_t *lock_list;
1227 uint32_t *work_list = NULL;
1228 int32_t fence_index;
1229
1230 /* copy in the data from user space */
1231 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1232 if (!lock_list) {
1233 DRM_ERROR("Unable allocate memory for lock list\n");
1234 result = -ENOMEM;
1235 goto error;
1236 }
1237
1238 if (copy_from_user(lock_list, args->handle_list,
1239 sizeof(uint32_t) * args->num_handles)) {
1240 DRM_ERROR("Unable to copy the lock list from the user\n");
1241 result = -EFAULT;
1242 goto free_handle_list;
1243 }
1244
1245
1246 work_list = lock_list;
1247 mutex_lock(&dev->struct_mutex);
1248
1249 /* build the fence for this group of handles */
1250 fence_index = find_empty_fence();
1251 if (fence_index == ENTRY_EMPTY) {
1252 DRM_ERROR("Unable to find a empty fence\n");
1253 args->lock_id = 0xDEADBEEF;
1254 result = -EFAULT;
1255 goto out_unlock;
1256 }
1257
1258 fence = &gem_buf_fence[fence_index];
1259 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1260 args->lock_id = gem_buf_fence[fence_index].fence_id;
1261
1262 for (j = args->num_handles; j > 0; j--, lock_list++) {
1263 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1264
1265 if (obj == NULL) {
1266 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1267 result = -EBADF;
1268 goto out_unlock;
1269 }
1270
1271 priv = obj->driver_private;
1272 this_fence_entry = NULL;
1273
1274 /* get a fence entry to hook into the fence */
1275 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1276 if (!priv->fence_entries[i].in_use) {
1277 this_fence_entry = &priv->fence_entries[i];
1278 this_fence_entry->in_use = 1;
1279 break;
1280 }
1281 }
1282
1283 if (this_fence_entry == NULL) {
1284 fence->num_buffers = 0;
1285 fence->fence_id = ENTRY_EMPTY;
1286 args->lock_id = 0xDEADBEAD;
1287 result = -EFAULT;
1288 drm_gem_object_unreference(obj);
1289 goto out_unlock;
1290 }
1291
1292 /* We're trying to lock - add to a fence */
1293 list_add((struct list_head *)this_fence_entry,
1294 &gem_buf_fence[fence_index].buffers_in_fence);
1295 if (priv->lockpid) {
1296
1297 if (priv->lockpid == args->pid) {
1298 /* now that things are running async this */
1299 /* happens when an op isn't done */
1300 /* so it's already locked by the calling pid */
1301 continue;
1302 }
1303
1304
1305 /* if a pid already had it locked */
1306 /* create and add to wait list */
1307 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1308 if (priv->wait_entries[i].in_use == 0) {
1309 /* this one is empty */
1310 lock_item = &priv->wait_entries[i];
1311 lock_item->in_use = 1;
1312 lock_item->pid = args->pid;
1313 INIT_LIST_HEAD((struct list_head *)
1314 &priv->wait_entries[i]);
1315 break;
1316 }
1317 }
1318
1319 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1320
1321 result = -EFAULT;
1322 drm_gem_object_unreference(obj);
1323 goto out_unlock;
1324 }
1325
1326 list_add_tail((struct list_head *)&lock_item->list,
1327 &priv->wait_list);
1328 mutex_unlock(&dev->struct_mutex);
1329 /* here we need to block */
1330 wait_event_interruptible_timeout(
1331 priv->wait_entries[i].process_wait_q,
1332 (priv->lockpid == 0),
1333 msecs_to_jiffies(64));
1334 mutex_lock(&dev->struct_mutex);
1335 lock_item->in_use = 0;
1336 }
1337
1338 /* Getting here means no one currently holds the lock */
1339 priv->lockpid = args->pid;
1340
1341 args->lock_id = gem_buf_fence[fence_index].fence_id;
1342 }
1343 fence->lockpid = args->pid;
1344
1345out_unlock:
1346 mutex_unlock(&dev->struct_mutex);
1347
1348free_handle_list:
1349 kfree(work_list);
1350
1351error:
1352 return result;
1353}
1354
1355int
1356kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1357 struct drm_file *file_priv)
1358{
1359 struct drm_kgsl_gem_unlock_handles *args = data;
1360 int result = 0;
1361 int32_t fence_index;
1362
1363 mutex_lock(&dev->struct_mutex);
1364 fence_index = find_fence(args->lock_id);
1365 if (fence_index == ENTRY_EMPTY) {
1366 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1367 result = -EFAULT;
1368 goto out_unlock;
1369 }
1370
1371 cleanup_fence(&gem_buf_fence[fence_index], 1);
1372
1373out_unlock:
1374 mutex_unlock(&dev->struct_mutex);
1375
1376 return result;
1377}
1378
1379
1380int
1381kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1382 struct drm_file *file_priv)
1383{
1384 struct drm_kgsl_gem_unlock_on_ts *args = data;
1385 int result = 0;
1386 int ts_done = 0;
1387 int32_t fence_index, ts_device;
1388 struct drm_kgsl_gem_object_fence *fence;
1389 struct kgsl_device *device;
1390
1391 if (args->type == DRM_KGSL_GEM_TS_3D)
1392 ts_device = KGSL_DEVICE_3D0;
1393 else if (args->type == DRM_KGSL_GEM_TS_2D)
1394 ts_device = KGSL_DEVICE_2D0;
1395 else {
1396 result = -EINVAL;
1397 goto error;
1398 }
1399
1400 device = kgsl_get_device(ts_device);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001401 ts_done = kgsl_check_timestamp(device, NULL, args->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402
1403 mutex_lock(&dev->struct_mutex);
1404
1405 fence_index = find_fence(args->lock_id);
1406 if (fence_index == ENTRY_EMPTY) {
1407 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1408 result = -EFAULT;
1409 goto out_unlock;
1410 }
1411
1412 fence = &gem_buf_fence[fence_index];
1413 fence->ts_device = ts_device;
1414
1415 if (!ts_done)
1416 fence->ts_valid = 1;
1417 else
1418 cleanup_fence(fence, 1);
1419
1420
1421out_unlock:
1422 mutex_unlock(&dev->struct_mutex);
1423
1424error:
1425 return result;
1426}
1427
1428struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1429 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1430 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1431 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1432 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1433 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1434 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1435 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1436 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1437 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1438 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1439 kgsl_gem_set_bufcount_ioctl, 0),
1440 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1441 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1442 kgsl_gem_lock_handle_ioctl, 0),
1443 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1444 kgsl_gem_unlock_handle_ioctl, 0),
1445 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1446 kgsl_gem_unlock_on_ts_ioctl, 0),
1447 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1448 DRM_MASTER),
1449};
1450
1451static struct drm_driver driver = {
Michael Street8bacdd02012-01-05 14:55:01 -08001452 .driver_features = DRIVER_GEM,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001453 .load = kgsl_drm_load,
1454 .unload = kgsl_drm_unload,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001455 .preclose = kgsl_drm_preclose,
1456 .suspend = kgsl_drm_suspend,
1457 .resume = kgsl_drm_resume,
1458 .reclaim_buffers = drm_core_reclaim_buffers,
1459 .gem_init_object = kgsl_gem_init_object,
1460 .gem_free_object = kgsl_gem_free_object,
1461 .ioctls = kgsl_drm_ioctls,
1462
1463 .fops = {
1464 .owner = THIS_MODULE,
1465 .open = drm_open,
1466 .release = drm_release,
1467 .unlocked_ioctl = drm_ioctl,
1468 .mmap = msm_drm_gem_mmap,
1469 .poll = drm_poll,
1470 .fasync = drm_fasync,
1471 },
1472
1473 .name = DRIVER_NAME,
1474 .desc = DRIVER_DESC,
1475 .date = DRIVER_DATE,
1476 .major = DRIVER_MAJOR,
1477 .minor = DRIVER_MINOR,
1478 .patchlevel = DRIVER_PATCHLEVEL,
1479};
1480
1481int kgsl_drm_init(struct platform_device *dev)
1482{
1483 int i;
1484
Michael Street8bacdd02012-01-05 14:55:01 -08001485 /* Only initialize once */
1486 if (kgsl_drm_inited == DRM_KGSL_INITED)
1487 return 0;
1488
1489 kgsl_drm_inited = DRM_KGSL_INITED;
1490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492
1493 INIT_LIST_HEAD(&kgsl_mem_list);
1494
1495 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1496 gem_buf_fence[i].num_buffers = 0;
1497 gem_buf_fence[i].ts_valid = 0;
1498 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1499 }
1500
Michael Street8bacdd02012-01-05 14:55:01 -08001501 return drm_platform_init(&driver, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502}
1503
1504void kgsl_drm_exit(void)
1505{
Michael Street8bacdd02012-01-05 14:55:01 -08001506 kgsl_drm_inited = DRM_KGSL_NOT_INITED;
1507 drm_platform_exit(&driver, driver.kdriver.platform_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508}