blob: 870a7d729b181d18bbe64198fa539436c8529561 [file] [log] [blame]
Michael Street8bacdd02012-01-05 14:55:01 -08001/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_device.h"
23#include "kgsl_drm.h"
24#include "kgsl_mmu.h"
25#include "kgsl_sharedmem.h"
26
27#define DRIVER_AUTHOR "Qualcomm"
28#define DRIVER_NAME "kgsl"
29#define DRIVER_DESC "KGSL DRM"
30#define DRIVER_DATE "20100127"
31
32#define DRIVER_MAJOR 2
33#define DRIVER_MINOR 1
34#define DRIVER_PATCHLEVEL 1
35
36#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
37
38#define ENTRY_EMPTY -1
39#define ENTRY_NEEDS_CLEANUP -2
40
Michael Street8bacdd02012-01-05 14:55:01 -080041#define DRM_KGSL_NOT_INITED -1
42#define DRM_KGSL_INITED 1
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
45#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
46
47/* Returns true if the memory type is in PMEM */
48
49#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
50#define TYPE_IS_PMEM(_t) \
51 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
52 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
53 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
54#else
55#define TYPE_IS_PMEM(_t) \
56 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
57 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
58#endif
59
60/* Returns true if the memory type is regular */
61
62#define TYPE_IS_MEM(_t) \
63 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
64 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
65 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
66
67#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
68
69/* Returns true if KMEM region is uncached */
70
71#define IS_MEM_UNCACHED(_t) \
72 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
73 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
74 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
75
76struct drm_kgsl_gem_object_wait_list_entry {
77 struct list_head list;
78 int pid;
79 int in_use;
80 wait_queue_head_t process_wait_q;
81};
82
83struct drm_kgsl_gem_object_fence {
84 int32_t fence_id;
85 unsigned int num_buffers;
86 int ts_valid;
87 unsigned int timestamp;
88 int ts_device;
89 int lockpid;
90 struct list_head buffers_in_fence;
91};
92
93struct drm_kgsl_gem_object_fence_list_entry {
94 struct list_head list;
95 int in_use;
96 struct drm_gem_object *gem_obj;
97};
98
99static int32_t fence_id = 0x1;
100
101static struct drm_kgsl_gem_object_fence
102 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
103
104struct drm_kgsl_gem_object {
105 struct drm_gem_object *obj;
106 uint32_t type;
107 struct kgsl_memdesc memdesc;
108 struct kgsl_pagetable *pagetable;
109 uint64_t mmap_offset;
110 int bufcount;
111 int flags;
112 struct list_head list;
113 int active;
114
115 struct {
116 uint32_t offset;
117 uint32_t gpuaddr;
118 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
119
120 int bound;
121 int lockpid;
122 /* Put these here to avoid allocing all the time */
123 struct drm_kgsl_gem_object_wait_list_entry
124 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
125 /* Each object can only appear in a single fence */
126 struct drm_kgsl_gem_object_fence_list_entry
127 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
128
129 struct list_head wait_list;
130};
131
Michael Street8bacdd02012-01-05 14:55:01 -0800132static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134/* This is a global list of all the memory currently mapped in the MMU */
135static struct list_head kgsl_mem_list;
136
137static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
138{
139 int cacheop = 0;
140
141 switch (op) {
142 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
143 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
144 DRM_KGSL_GEM_CACHE_WBACKWA))
145 cacheop = KGSL_CACHE_OP_CLEAN;
146
147 break;
148
149 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
150 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
151 DRM_KGSL_GEM_CACHE_WBACKWA |
152 DRM_KGSL_GEM_CACHE_WTHROUGH))
153 cacheop = KGSL_CACHE_OP_INV;
154 }
155
156 kgsl_cache_range_op(memdesc, cacheop);
157}
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159/* TODO:
160 * Add vsync wait */
161
162static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
163{
164 return 0;
165}
166
167static int kgsl_drm_unload(struct drm_device *dev)
168{
169 return 0;
170}
171
172struct kgsl_drm_device_priv {
173 struct kgsl_device *device[KGSL_DEVICE_MAX];
174 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
175};
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
178{
179}
180
181static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
182{
183 return 0;
184}
185
186static int kgsl_drm_resume(struct drm_device *dev)
187{
188 return 0;
189}
190
191static void
192kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
193{
194 struct drm_device *dev = obj->dev;
195 struct drm_gem_mm *mm = dev->mm_private;
196 struct drm_kgsl_gem_object *priv = obj->driver_private;
197 struct drm_map_list *list;
198
199 list = &obj->map_list;
200 drm_ht_remove_item(&mm->offset_hash, &list->hash);
201 if (list->file_offset_node) {
202 drm_mm_put_block(list->file_offset_node);
203 list->file_offset_node = NULL;
204 }
205
206 kfree(list->map);
207 list->map = NULL;
208
209 priv->mmap_offset = 0;
210}
211
212static int
213kgsl_gem_memory_allocated(struct drm_gem_object *obj)
214{
215 struct drm_kgsl_gem_object *priv = obj->driver_private;
216 return priv->memdesc.size ? 1 : 0;
217}
218
219static int
220kgsl_gem_alloc_memory(struct drm_gem_object *obj)
221{
222 struct drm_kgsl_gem_object *priv = obj->driver_private;
223 int index;
Michael Street8bacdd02012-01-05 14:55:01 -0800224 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225
226 /* Return if the memory is already allocated */
227
228 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
229 return 0;
230
Michael Street8bacdd02012-01-05 14:55:01 -0800231 if (priv->pagetable == NULL) {
232 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
233
234 if (priv->pagetable == NULL) {
235 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
236 return -EINVAL;
237 }
238 }
239
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240 if (TYPE_IS_PMEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
Michael Street8bacdd02012-01-05 14:55:01 -0800242 priv->type & DRM_KGSL_GEM_PMEM_EBI) {
Michael Street8bacdd02012-01-05 14:55:01 -0800243 result = kgsl_sharedmem_ebimem_user(
244 &priv->memdesc,
245 priv->pagetable,
246 obj->size * priv->bufcount,
247 0);
248 if (result) {
249 DRM_ERROR(
250 "Unable to allocate PMEM memory\n");
251 return result;
252 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 }
Michael Street8bacdd02012-01-05 14:55:01 -0800254 else
255 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256
257 } else if (TYPE_IS_MEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Michael Street8bacdd02012-01-05 14:55:01 -0800259 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
260 priv->type & DRM_KGSL_GEM_CACHE_MASK)
261 list_add(&priv->list, &kgsl_mem_list);
262
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600263 result = kgsl_sharedmem_page_alloc_user(&priv->memdesc,
Michael Street8bacdd02012-01-05 14:55:01 -0800264 priv->pagetable,
265 obj->size * priv->bufcount, 0);
266
267 if (result != 0) {
268 DRM_ERROR(
269 "Unable to allocate Vmalloc user memory\n");
270 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 } else
273 return -EINVAL;
274
Michael Street8bacdd02012-01-05 14:55:01 -0800275 for (index = 0; index < priv->bufcount; index++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 priv->bufs[index].offset = index * obj->size;
Michael Street8bacdd02012-01-05 14:55:01 -0800277 priv->bufs[index].gpuaddr =
278 priv->memdesc.gpuaddr +
279 priv->bufs[index].offset;
280 }
281 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282
283 return 0;
284}
285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287kgsl_gem_free_memory(struct drm_gem_object *obj)
288{
289 struct drm_kgsl_gem_object *priv = obj->driver_private;
290
291 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
292 return;
293
294 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
295 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
296
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 kgsl_sharedmem_free(&priv->memdesc);
Michael Street8bacdd02012-01-05 14:55:01 -0800298
299 kgsl_mmu_putpagetable(priv->pagetable);
300 priv->pagetable = NULL;
301
302 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
303 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
304 list_del(&priv->list);
305
306 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308}
309
310int
311kgsl_gem_init_object(struct drm_gem_object *obj)
312{
313 struct drm_kgsl_gem_object *priv;
314 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
315 if (priv == NULL) {
316 DRM_ERROR("Unable to create GEM object\n");
317 return -ENOMEM;
318 }
319
320 obj->driver_private = priv;
321 priv->obj = obj;
322
323 return 0;
324}
325
326void
327kgsl_gem_free_object(struct drm_gem_object *obj)
328{
329 kgsl_gem_free_memory(obj);
330 kgsl_gem_free_mmap_offset(obj);
331 drm_gem_object_release(obj);
332 kfree(obj->driver_private);
333}
334
335static int
336kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
337{
338 struct drm_device *dev = obj->dev;
339 struct drm_gem_mm *mm = dev->mm_private;
340 struct drm_kgsl_gem_object *priv = obj->driver_private;
341 struct drm_map_list *list;
342 int msize;
343
344 list = &obj->map_list;
345 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
346 if (list->map == NULL) {
347 DRM_ERROR("Unable to allocate drm_map_list\n");
348 return -ENOMEM;
349 }
350
351 msize = obj->size * priv->bufcount;
352
353 list->map->type = _DRM_GEM;
354 list->map->size = msize;
355 list->map->handle = obj;
356
357 /* Allocate a mmap offset */
358 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
359 msize / PAGE_SIZE,
360 0, 0);
361
362 if (!list->file_offset_node) {
363 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
364 kfree(list->map);
365 return -ENOMEM;
366 }
367
368 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
369 msize / PAGE_SIZE, 0);
370
371 if (!list->file_offset_node) {
372 DRM_ERROR("Unable to create the file_offset_node\n");
373 kfree(list->map);
374 return -ENOMEM;
375 }
376
377 list->hash.key = list->file_offset_node->start;
378 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
379 DRM_ERROR("Failed to add to map hash\n");
380 drm_mm_put_block(list->file_offset_node);
381 kfree(list->map);
382 return -ENOMEM;
383 }
384
385 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
386
387 return 0;
388}
389
390int
391kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
392 unsigned long *len)
393{
394 struct file *filp;
395 struct drm_device *dev;
396 struct drm_file *file_priv;
397 struct drm_gem_object *obj;
398 struct drm_kgsl_gem_object *priv;
399 int ret = 0;
400
401 filp = fget(drm_fd);
402 if (unlikely(filp == NULL)) {
Michael Street8bacdd02012-01-05 14:55:01 -0800403 DRM_ERROR("Unable to get the DRM file descriptor\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 return -EINVAL;
405 }
406 file_priv = filp->private_data;
407 if (unlikely(file_priv == NULL)) {
408 DRM_ERROR("Unable to get the file private data\n");
409 fput(filp);
410 return -EINVAL;
411 }
412 dev = file_priv->minor->dev;
413 if (unlikely(dev == NULL)) {
414 DRM_ERROR("Unable to get the minor device\n");
415 fput(filp);
416 return -EINVAL;
417 }
418
419 obj = drm_gem_object_lookup(dev, file_priv, handle);
420 if (unlikely(obj == NULL)) {
421 DRM_ERROR("Invalid GEM handle %x\n", handle);
422 fput(filp);
423 return -EBADF;
424 }
425
426 mutex_lock(&dev->struct_mutex);
427 priv = obj->driver_private;
428
429 /* We can only use the MDP for PMEM regions */
430
431 if (TYPE_IS_PMEM(priv->type)) {
432 *start = priv->memdesc.physaddr +
433 priv->bufs[priv->active].offset;
434
435 *len = priv->memdesc.size;
436
437 kgsl_gem_mem_flush(&priv->memdesc,
438 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
439 } else {
440 *start = 0;
441 *len = 0;
442 ret = -EINVAL;
443 }
444
445 drm_gem_object_unreference(obj);
446 mutex_unlock(&dev->struct_mutex);
447
448 fput(filp);
449 return ret;
450}
451
452static int
453kgsl_gem_init_obj(struct drm_device *dev,
454 struct drm_file *file_priv,
455 struct drm_gem_object *obj,
456 int *handle)
457{
458 struct drm_kgsl_gem_object *priv;
459 int ret, i;
460
461 mutex_lock(&dev->struct_mutex);
462 priv = obj->driver_private;
463
464 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
465 priv->bufcount = 1;
466 priv->active = 0;
467 priv->bound = 0;
468
469 /* To preserve backwards compatability, the default memory source
470 is EBI */
471
472 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
473
474 ret = drm_gem_handle_create(file_priv, obj, handle);
475
Michael Street8bacdd02012-01-05 14:55:01 -0800476 drm_gem_object_unreference(obj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700477 INIT_LIST_HEAD(&priv->wait_list);
478
479 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
480 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
481 priv->wait_entries[i].pid = 0;
482 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
483 }
484
485 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
486 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
487 priv->fence_entries[i].in_use = 0;
488 priv->fence_entries[i].gem_obj = obj;
489 }
490
491 mutex_unlock(&dev->struct_mutex);
492 return ret;
493}
494
495int
496kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
497 struct drm_file *file_priv)
498{
499 struct drm_kgsl_gem_create *create = data;
500 struct drm_gem_object *obj;
501 int ret, handle;
502
503 /* Page align the size so we can allocate multiple buffers */
504 create->size = ALIGN(create->size, 4096);
505
506 obj = drm_gem_object_alloc(dev, create->size);
507
508 if (obj == NULL) {
509 DRM_ERROR("Unable to allocate the GEM object\n");
510 return -ENOMEM;
511 }
512
513 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
514 if (ret)
515 return ret;
516
517 create->handle = handle;
518 return 0;
519}
520
521int
522kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
523 struct drm_file *file_priv)
524{
525 struct drm_kgsl_gem_create_fd *args = data;
526 struct file *file;
527 dev_t rdev;
528 struct fb_info *info;
529 struct drm_gem_object *obj;
530 struct drm_kgsl_gem_object *priv;
531 int ret, put_needed, handle;
532
533 file = fget_light(args->fd, &put_needed);
534
535 if (file == NULL) {
536 DRM_ERROR("Unable to get the file object\n");
537 return -EBADF;
538 }
539
540 rdev = file->f_dentry->d_inode->i_rdev;
541
542 /* Only framebuffer objects are supported ATM */
543
544 if (MAJOR(rdev) != FB_MAJOR) {
545 DRM_ERROR("File descriptor is not a framebuffer\n");
546 ret = -EBADF;
547 goto error_fput;
548 }
549
550 info = registered_fb[MINOR(rdev)];
551
552 if (info == NULL) {
553 DRM_ERROR("Framebuffer minor %d is not registered\n",
554 MINOR(rdev));
555 ret = -EBADF;
556 goto error_fput;
557 }
558
559 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
560
561 if (obj == NULL) {
562 DRM_ERROR("Unable to allocate GEM object\n");
563 ret = -ENOMEM;
564 goto error_fput;
565 }
566
567 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
568
569 if (ret)
570 goto error_fput;
571
572 mutex_lock(&dev->struct_mutex);
573
574 priv = obj->driver_private;
575 priv->memdesc.physaddr = info->fix.smem_start;
576 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
577
578 mutex_unlock(&dev->struct_mutex);
579 args->handle = handle;
580
581error_fput:
582 fput_light(file, put_needed);
583
584 return ret;
585}
586
587int
588kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
589 struct drm_file *file_priv)
590{
591 struct drm_kgsl_gem_memtype *args = data;
592 struct drm_gem_object *obj;
593 struct drm_kgsl_gem_object *priv;
594 int ret = 0;
595
596 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
597
598 if (obj == NULL) {
599 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
600 return -EBADF;
601 }
602
603 mutex_lock(&dev->struct_mutex);
604 priv = obj->driver_private;
605
606 if (TYPE_IS_FD(priv->type))
607 ret = -EINVAL;
608 else {
609 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
610 priv->type = args->type;
611 else
612 ret = -EINVAL;
613 }
614
615 drm_gem_object_unreference(obj);
616 mutex_unlock(&dev->struct_mutex);
617
618 return ret;
619}
620
621int
622kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
623 struct drm_file *file_priv)
624{
625 struct drm_kgsl_gem_memtype *args = data;
626 struct drm_gem_object *obj;
627 struct drm_kgsl_gem_object *priv;
628
629 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
630
631 if (obj == NULL) {
632 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
633 return -EBADF;
634 }
635
636 mutex_lock(&dev->struct_mutex);
637 priv = obj->driver_private;
638
639 args->type = priv->type;
640
641 drm_gem_object_unreference(obj);
642 mutex_unlock(&dev->struct_mutex);
643
644 return 0;
645}
646
647int
648kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
649 struct drm_file *file_priv)
650{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 return 0;
652}
653
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700654int
655kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
656 struct drm_file *file_priv)
657{
Michael Street8bacdd02012-01-05 14:55:01 -0800658 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700659}
660
661/* Allocate the memory and prepare it for CPU mapping */
662
663int
664kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
665 struct drm_file *file_priv)
666{
667 struct drm_kgsl_gem_alloc *args = data;
668 struct drm_gem_object *obj;
669 struct drm_kgsl_gem_object *priv;
670 int ret;
671
672 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
673
674 if (obj == NULL) {
675 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
676 return -EBADF;
677 }
678
679 mutex_lock(&dev->struct_mutex);
680 priv = obj->driver_private;
681
682 ret = kgsl_gem_alloc_memory(obj);
683
684 if (ret) {
685 DRM_ERROR("Unable to allocate object memory\n");
686 } else if (!priv->mmap_offset) {
687 ret = kgsl_gem_create_mmap_offset(obj);
688 if (ret)
689 DRM_ERROR("Unable to create a mmap offset\n");
690 }
691
692 args->offset = priv->mmap_offset;
693
694 drm_gem_object_unreference(obj);
695 mutex_unlock(&dev->struct_mutex);
696
697 return ret;
698}
699
700int
701kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
702 struct drm_file *file_priv)
703{
704 struct drm_kgsl_gem_mmap *args = data;
705 struct drm_gem_object *obj;
706 unsigned long addr;
707
708 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
709
710 if (obj == NULL) {
711 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
712 return -EBADF;
713 }
714
715 down_write(&current->mm->mmap_sem);
716
717 addr = do_mmap(obj->filp, 0, args->size,
718 PROT_READ | PROT_WRITE, MAP_SHARED,
719 args->offset);
720
721 up_write(&current->mm->mmap_sem);
722
723 mutex_lock(&dev->struct_mutex);
724 drm_gem_object_unreference(obj);
725 mutex_unlock(&dev->struct_mutex);
726
727 if (IS_ERR((void *) addr))
728 return addr;
729
730 args->hostptr = (uint32_t) addr;
731 return 0;
732}
733
734/* This function is deprecated */
735
736int
737kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
738 struct drm_file *file_priv)
739{
740 struct drm_kgsl_gem_prep *args = data;
741 struct drm_gem_object *obj;
742 struct drm_kgsl_gem_object *priv;
743 int ret;
744
745 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
746
747 if (obj == NULL) {
748 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
749 return -EBADF;
750 }
751
752 mutex_lock(&dev->struct_mutex);
753 priv = obj->driver_private;
754
755 ret = kgsl_gem_alloc_memory(obj);
756 if (ret) {
757 DRM_ERROR("Unable to allocate object memory\n");
758 drm_gem_object_unreference(obj);
759 mutex_unlock(&dev->struct_mutex);
760 return ret;
761 }
762
763 if (priv->mmap_offset == 0) {
764 ret = kgsl_gem_create_mmap_offset(obj);
765 if (ret) {
766 drm_gem_object_unreference(obj);
767 mutex_unlock(&dev->struct_mutex);
768 return ret;
769 }
770 }
771
772 args->offset = priv->mmap_offset;
773 args->phys = priv->memdesc.physaddr;
774
775 drm_gem_object_unreference(obj);
776 mutex_unlock(&dev->struct_mutex);
777
778 return 0;
779}
780
781int
782kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *file_priv)
784{
785 struct drm_kgsl_gem_bufinfo *args = data;
786 struct drm_gem_object *obj;
787 struct drm_kgsl_gem_object *priv;
788 int ret = -EINVAL;
789 int index;
790
791 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
792
793 if (obj == NULL) {
794 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
795 return -EBADF;
796 }
797
798 mutex_lock(&dev->struct_mutex);
799 priv = obj->driver_private;
800
801 if (!kgsl_gem_memory_allocated(obj)) {
802 DRM_ERROR("Memory not allocated for this object\n");
803 goto out;
804 }
805
806 for (index = 0; index < priv->bufcount; index++) {
807 args->offset[index] = priv->bufs[index].offset;
808 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
809 }
810
811 args->count = priv->bufcount;
812 args->active = priv->active;
813
814 ret = 0;
815
816out:
817 drm_gem_object_unreference(obj);
818 mutex_unlock(&dev->struct_mutex);
819
820 return ret;
821}
822
823int
824kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
825 struct drm_file *file_priv)
826{
827 struct drm_kgsl_gem_bufcount *args = data;
828 struct drm_gem_object *obj;
829 struct drm_kgsl_gem_object *priv;
830 int ret = -EINVAL;
831
832 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
833 return -EINVAL;
834
835 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
836
837 if (obj == NULL) {
838 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
839 return -EBADF;
840 }
841
842 mutex_lock(&dev->struct_mutex);
843 priv = obj->driver_private;
844
845 /* It is too much math to worry about what happens if we are already
846 allocated, so just bail if we are */
847
848 if (kgsl_gem_memory_allocated(obj)) {
849 DRM_ERROR("Memory already allocated - cannot change"
850 "number of buffers\n");
851 goto out;
852 }
853
854 priv->bufcount = args->bufcount;
855 ret = 0;
856
857out:
858 drm_gem_object_unreference(obj);
859 mutex_unlock(&dev->struct_mutex);
860
861 return ret;
862}
863
864int
865kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file_priv)
867{
868 struct drm_kgsl_gem_active *args = data;
869 struct drm_gem_object *obj;
870 struct drm_kgsl_gem_object *priv;
871 int ret = -EINVAL;
872
873 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
874
875 if (obj == NULL) {
876 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
877 return -EBADF;
878 }
879
880 mutex_lock(&dev->struct_mutex);
881 priv = obj->driver_private;
882
883 if (args->active < 0 || args->active >= priv->bufcount) {
884 DRM_ERROR("Invalid active buffer %d\n", args->active);
885 goto out;
886 }
887
888 priv->active = args->active;
889 ret = 0;
890
891out:
892 drm_gem_object_unreference(obj);
893 mutex_unlock(&dev->struct_mutex);
894
895 return ret;
896}
897
898int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
899{
900 struct drm_gem_object *obj = vma->vm_private_data;
901 struct drm_device *dev = obj->dev;
902 struct drm_kgsl_gem_object *priv;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600903 unsigned long offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 struct page *page;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600905 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906
907 mutex_lock(&dev->struct_mutex);
908
909 priv = obj->driver_private;
910
911 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600912 i = offset >> PAGE_SHIFT;
913 page = sg_page(&(priv->memdesc.sg[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915 if (!page) {
916 mutex_unlock(&dev->struct_mutex);
917 return VM_FAULT_SIGBUS;
918 }
919
920 get_page(page);
921 vmf->page = page;
922
923 mutex_unlock(&dev->struct_mutex);
924 return 0;
925}
926
927int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
928{
929 struct drm_gem_object *obj = vma->vm_private_data;
930 struct drm_device *dev = obj->dev;
931 struct drm_kgsl_gem_object *priv;
932 unsigned long offset, pfn;
933 int ret = 0;
934
935 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
936 PAGE_SHIFT;
937
938 mutex_lock(&dev->struct_mutex);
939
940 priv = obj->driver_private;
941
942 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
943 ret = vm_insert_pfn(vma,
944 (unsigned long) vmf->virtual_address, pfn);
945 mutex_unlock(&dev->struct_mutex);
946
947 switch (ret) {
948 case -ENOMEM:
949 case -EAGAIN:
950 return VM_FAULT_OOM;
951 case -EFAULT:
952 return VM_FAULT_SIGBUS;
953 default:
954 return VM_FAULT_NOPAGE;
955 }
956}
957
958static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
959 .fault = kgsl_gem_kmem_fault,
960 .open = drm_gem_vm_open,
961 .close = drm_gem_vm_close,
962};
963
964static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
965 .fault = kgsl_gem_phys_fault,
966 .open = drm_gem_vm_open,
967 .close = drm_gem_vm_close,
968};
969
970/* This is a clone of the standard drm_gem_mmap function modified to allow
971 us to properly map KMEM regions as well as the PMEM regions */
972
973int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
974{
975 struct drm_file *priv = filp->private_data;
976 struct drm_device *dev = priv->minor->dev;
977 struct drm_gem_mm *mm = dev->mm_private;
978 struct drm_local_map *map = NULL;
979 struct drm_gem_object *obj;
980 struct drm_hash_item *hash;
981 struct drm_kgsl_gem_object *gpriv;
982 int ret = 0;
983
984 mutex_lock(&dev->struct_mutex);
985
986 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
987 mutex_unlock(&dev->struct_mutex);
988 return drm_mmap(filp, vma);
989 }
990
991 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
992 if (!map ||
993 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
994 ret = -EPERM;
995 goto out_unlock;
996 }
997
998 /* Check for valid size. */
999 if (map->size < vma->vm_end - vma->vm_start) {
1000 ret = -EINVAL;
1001 goto out_unlock;
1002 }
1003
1004 obj = map->handle;
1005
1006 gpriv = obj->driver_private;
1007
1008 /* VM_PFNMAP is only for memory that doesn't use struct page
1009 * in other words, not "normal" memory. If you try to use it
1010 * with "normal" memory then the mappings don't get flushed. */
1011
1012 if (TYPE_IS_MEM(gpriv->type)) {
1013 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1014 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1015 } else {
1016 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1017 VM_DONTEXPAND;
1018 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1019 }
1020
1021 vma->vm_private_data = map->handle;
1022
1023
1024 /* Take care of requested caching policy */
1025 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1026 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1027 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1028 vma->vm_page_prot =
1029 pgprot_writebackwacache(vma->vm_page_prot);
1030 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1031 vma->vm_page_prot =
1032 pgprot_writebackcache(vma->vm_page_prot);
1033 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1034 vma->vm_page_prot =
1035 pgprot_writethroughcache(vma->vm_page_prot);
1036 else
1037 vma->vm_page_prot =
1038 pgprot_writecombine(vma->vm_page_prot);
1039 } else {
1040 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1041 vma->vm_page_prot =
1042 pgprot_noncached(vma->vm_page_prot);
1043 else
1044 /* default pmem is WC */
1045 vma->vm_page_prot =
1046 pgprot_writecombine(vma->vm_page_prot);
1047 }
1048
1049 /* flush out existing KMEM cached mappings if new ones are
1050 * of uncached type */
1051 if (IS_MEM_UNCACHED(gpriv->type))
1052 kgsl_cache_range_op(&gpriv->memdesc,
1053 KGSL_CACHE_OP_FLUSH);
1054
1055 /* Add the other memory types here */
1056
1057 /* Take a ref for this mapping of the object, so that the fault
1058 * handler can dereference the mmap offset's pointer to the object.
1059 * This reference is cleaned up by the corresponding vm_close
1060 * (which should happen whether the vma was created by this call, or
1061 * by a vm_open due to mremap or partial unmap or whatever).
1062 */
1063 drm_gem_object_reference(obj);
1064
1065 vma->vm_file = filp; /* Needed for drm_vm_open() */
1066 drm_vm_open_locked(vma);
1067
1068out_unlock:
1069 mutex_unlock(&dev->struct_mutex);
1070
1071 return ret;
1072}
1073
1074void
1075cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1076{
1077 int j;
1078 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1079 struct drm_kgsl_gem_object *unlock_obj;
1080 struct drm_gem_object *obj;
1081 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1082
1083 fence->ts_valid = 0;
1084 fence->timestamp = -1;
1085 fence->ts_device = -1;
1086
1087 /* Walk the list of buffers in this fence and clean up the */
1088 /* references. Note that this can cause memory allocations */
1089 /* to be freed */
1090 for (j = fence->num_buffers; j > 0; j--) {
1091 this_fence_entry =
1092 (struct drm_kgsl_gem_object_fence_list_entry *)
1093 fence->buffers_in_fence.prev;
1094
1095 this_fence_entry->in_use = 0;
1096 obj = this_fence_entry->gem_obj;
1097 unlock_obj = obj->driver_private;
1098
1099 /* Delete it from the list */
1100
1101 list_del(&this_fence_entry->list);
1102
1103 /* we are unlocking - see if there are other pids waiting */
1104 if (check_waiting) {
1105 if (!list_empty(&unlock_obj->wait_list)) {
1106 lock_next =
1107 (struct drm_kgsl_gem_object_wait_list_entry *)
1108 unlock_obj->wait_list.prev;
1109
1110 list_del((struct list_head *)&lock_next->list);
1111
1112 unlock_obj->lockpid = 0;
1113 wake_up_interruptible(
1114 &lock_next->process_wait_q);
1115 lock_next->pid = 0;
1116
1117 } else {
1118 /* List is empty so set pid to 0 */
1119 unlock_obj->lockpid = 0;
1120 }
1121 }
1122
1123 drm_gem_object_unreference(obj);
1124 }
1125 /* here all the buffers in the fence are released */
1126 /* clear the fence entry */
1127 fence->fence_id = ENTRY_EMPTY;
1128}
1129
1130int
1131find_empty_fence(void)
1132{
1133 int i;
1134
1135 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1136 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1137 gem_buf_fence[i].fence_id = fence_id++;
1138 gem_buf_fence[i].ts_valid = 0;
1139 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1140 if (fence_id == 0xFFFFFFF0)
1141 fence_id = 1;
1142 return i;
1143 } else {
1144
1145 /* Look for entries to be cleaned up */
1146 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1147 cleanup_fence(&gem_buf_fence[i], 0);
1148 }
1149 }
1150
1151 return ENTRY_EMPTY;
1152}
1153
1154int
1155find_fence(int index)
1156{
1157 int i;
1158
1159 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1160 if (gem_buf_fence[i].fence_id == index)
1161 return i;
1162 }
1163
1164 return ENTRY_EMPTY;
1165}
1166
1167void
1168wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1169{
1170 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1171 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1172 struct drm_kgsl_gem_object *unlock_obj;
1173 struct drm_gem_object *obj;
1174
1175 /* TS has expired when we get here */
1176 fence->ts_valid = 0;
1177 fence->timestamp = -1;
1178 fence->ts_device = -1;
1179
1180 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1181 obj = this_fence_entry->gem_obj;
1182 unlock_obj = obj->driver_private;
1183
1184 if (!list_empty(&unlock_obj->wait_list)) {
1185 lock_next =
1186 (struct drm_kgsl_gem_object_wait_list_entry *)
1187 unlock_obj->wait_list.prev;
1188
1189 /* Unblock the pid */
1190 lock_next->pid = 0;
1191
1192 /* Delete it from the list */
1193 list_del((struct list_head *)&lock_next->list);
1194
1195 unlock_obj->lockpid = 0;
1196 wake_up_interruptible(&lock_next->process_wait_q);
1197
1198 } else {
1199 /* List is empty so set pid to 0 */
1200 unlock_obj->lockpid = 0;
1201 }
1202 }
1203 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1204}
1205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206int
1207kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1208 struct drm_file *file_priv)
1209{
1210 /* The purpose of this function is to lock a given set of handles. */
1211 /* The driver will maintain a list of locked handles. */
1212 /* If a request comes in for a handle that's locked the thread will */
1213 /* block until it's no longer in use. */
1214
1215 struct drm_kgsl_gem_lock_handles *args = data;
1216 struct drm_gem_object *obj;
1217 struct drm_kgsl_gem_object *priv;
1218 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1219 struct drm_kgsl_gem_object_fence *fence;
1220 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1221 int i, j;
1222 int result = 0;
1223 uint32_t *lock_list;
1224 uint32_t *work_list = NULL;
1225 int32_t fence_index;
1226
1227 /* copy in the data from user space */
1228 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1229 if (!lock_list) {
1230 DRM_ERROR("Unable allocate memory for lock list\n");
1231 result = -ENOMEM;
1232 goto error;
1233 }
1234
1235 if (copy_from_user(lock_list, args->handle_list,
1236 sizeof(uint32_t) * args->num_handles)) {
1237 DRM_ERROR("Unable to copy the lock list from the user\n");
1238 result = -EFAULT;
1239 goto free_handle_list;
1240 }
1241
1242
1243 work_list = lock_list;
1244 mutex_lock(&dev->struct_mutex);
1245
1246 /* build the fence for this group of handles */
1247 fence_index = find_empty_fence();
1248 if (fence_index == ENTRY_EMPTY) {
1249 DRM_ERROR("Unable to find a empty fence\n");
1250 args->lock_id = 0xDEADBEEF;
1251 result = -EFAULT;
1252 goto out_unlock;
1253 }
1254
1255 fence = &gem_buf_fence[fence_index];
1256 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1257 args->lock_id = gem_buf_fence[fence_index].fence_id;
1258
1259 for (j = args->num_handles; j > 0; j--, lock_list++) {
1260 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1261
1262 if (obj == NULL) {
1263 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1264 result = -EBADF;
1265 goto out_unlock;
1266 }
1267
1268 priv = obj->driver_private;
1269 this_fence_entry = NULL;
1270
1271 /* get a fence entry to hook into the fence */
1272 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1273 if (!priv->fence_entries[i].in_use) {
1274 this_fence_entry = &priv->fence_entries[i];
1275 this_fence_entry->in_use = 1;
1276 break;
1277 }
1278 }
1279
1280 if (this_fence_entry == NULL) {
1281 fence->num_buffers = 0;
1282 fence->fence_id = ENTRY_EMPTY;
1283 args->lock_id = 0xDEADBEAD;
1284 result = -EFAULT;
1285 drm_gem_object_unreference(obj);
1286 goto out_unlock;
1287 }
1288
1289 /* We're trying to lock - add to a fence */
1290 list_add((struct list_head *)this_fence_entry,
1291 &gem_buf_fence[fence_index].buffers_in_fence);
1292 if (priv->lockpid) {
1293
1294 if (priv->lockpid == args->pid) {
1295 /* now that things are running async this */
1296 /* happens when an op isn't done */
1297 /* so it's already locked by the calling pid */
1298 continue;
1299 }
1300
1301
1302 /* if a pid already had it locked */
1303 /* create and add to wait list */
1304 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1305 if (priv->wait_entries[i].in_use == 0) {
1306 /* this one is empty */
1307 lock_item = &priv->wait_entries[i];
1308 lock_item->in_use = 1;
1309 lock_item->pid = args->pid;
1310 INIT_LIST_HEAD((struct list_head *)
1311 &priv->wait_entries[i]);
1312 break;
1313 }
1314 }
1315
1316 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1317
1318 result = -EFAULT;
1319 drm_gem_object_unreference(obj);
1320 goto out_unlock;
1321 }
1322
1323 list_add_tail((struct list_head *)&lock_item->list,
1324 &priv->wait_list);
1325 mutex_unlock(&dev->struct_mutex);
1326 /* here we need to block */
1327 wait_event_interruptible_timeout(
1328 priv->wait_entries[i].process_wait_q,
1329 (priv->lockpid == 0),
1330 msecs_to_jiffies(64));
1331 mutex_lock(&dev->struct_mutex);
1332 lock_item->in_use = 0;
1333 }
1334
1335 /* Getting here means no one currently holds the lock */
1336 priv->lockpid = args->pid;
1337
1338 args->lock_id = gem_buf_fence[fence_index].fence_id;
1339 }
1340 fence->lockpid = args->pid;
1341
1342out_unlock:
1343 mutex_unlock(&dev->struct_mutex);
1344
1345free_handle_list:
1346 kfree(work_list);
1347
1348error:
1349 return result;
1350}
1351
1352int
1353kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1354 struct drm_file *file_priv)
1355{
1356 struct drm_kgsl_gem_unlock_handles *args = data;
1357 int result = 0;
1358 int32_t fence_index;
1359
1360 mutex_lock(&dev->struct_mutex);
1361 fence_index = find_fence(args->lock_id);
1362 if (fence_index == ENTRY_EMPTY) {
1363 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1364 result = -EFAULT;
1365 goto out_unlock;
1366 }
1367
1368 cleanup_fence(&gem_buf_fence[fence_index], 1);
1369
1370out_unlock:
1371 mutex_unlock(&dev->struct_mutex);
1372
1373 return result;
1374}
1375
1376
1377int
1378kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1379 struct drm_file *file_priv)
1380{
1381 struct drm_kgsl_gem_unlock_on_ts *args = data;
1382 int result = 0;
1383 int ts_done = 0;
1384 int32_t fence_index, ts_device;
1385 struct drm_kgsl_gem_object_fence *fence;
1386 struct kgsl_device *device;
1387
1388 if (args->type == DRM_KGSL_GEM_TS_3D)
1389 ts_device = KGSL_DEVICE_3D0;
1390 else if (args->type == DRM_KGSL_GEM_TS_2D)
1391 ts_device = KGSL_DEVICE_2D0;
1392 else {
1393 result = -EINVAL;
1394 goto error;
1395 }
1396
1397 device = kgsl_get_device(ts_device);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001398 ts_done = kgsl_check_timestamp(device, NULL, args->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399
1400 mutex_lock(&dev->struct_mutex);
1401
1402 fence_index = find_fence(args->lock_id);
1403 if (fence_index == ENTRY_EMPTY) {
1404 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1405 result = -EFAULT;
1406 goto out_unlock;
1407 }
1408
1409 fence = &gem_buf_fence[fence_index];
1410 fence->ts_device = ts_device;
1411
1412 if (!ts_done)
1413 fence->ts_valid = 1;
1414 else
1415 cleanup_fence(fence, 1);
1416
1417
1418out_unlock:
1419 mutex_unlock(&dev->struct_mutex);
1420
1421error:
1422 return result;
1423}
1424
1425struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1426 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1427 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1428 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1429 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1430 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1431 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1432 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1433 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1434 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1435 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1436 kgsl_gem_set_bufcount_ioctl, 0),
1437 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1438 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1439 kgsl_gem_lock_handle_ioctl, 0),
1440 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1441 kgsl_gem_unlock_handle_ioctl, 0),
1442 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1443 kgsl_gem_unlock_on_ts_ioctl, 0),
1444 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1445 DRM_MASTER),
1446};
1447
1448static struct drm_driver driver = {
Michael Street8bacdd02012-01-05 14:55:01 -08001449 .driver_features = DRIVER_GEM,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001450 .load = kgsl_drm_load,
1451 .unload = kgsl_drm_unload,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 .preclose = kgsl_drm_preclose,
1453 .suspend = kgsl_drm_suspend,
1454 .resume = kgsl_drm_resume,
1455 .reclaim_buffers = drm_core_reclaim_buffers,
1456 .gem_init_object = kgsl_gem_init_object,
1457 .gem_free_object = kgsl_gem_free_object,
1458 .ioctls = kgsl_drm_ioctls,
1459
1460 .fops = {
1461 .owner = THIS_MODULE,
1462 .open = drm_open,
1463 .release = drm_release,
1464 .unlocked_ioctl = drm_ioctl,
1465 .mmap = msm_drm_gem_mmap,
1466 .poll = drm_poll,
1467 .fasync = drm_fasync,
1468 },
1469
1470 .name = DRIVER_NAME,
1471 .desc = DRIVER_DESC,
1472 .date = DRIVER_DATE,
1473 .major = DRIVER_MAJOR,
1474 .minor = DRIVER_MINOR,
1475 .patchlevel = DRIVER_PATCHLEVEL,
1476};
1477
1478int kgsl_drm_init(struct platform_device *dev)
1479{
1480 int i;
1481
Michael Street8bacdd02012-01-05 14:55:01 -08001482 /* Only initialize once */
1483 if (kgsl_drm_inited == DRM_KGSL_INITED)
1484 return 0;
1485
1486 kgsl_drm_inited = DRM_KGSL_INITED;
1487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489
1490 INIT_LIST_HEAD(&kgsl_mem_list);
1491
1492 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1493 gem_buf_fence[i].num_buffers = 0;
1494 gem_buf_fence[i].ts_valid = 0;
1495 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1496 }
1497
Michael Street8bacdd02012-01-05 14:55:01 -08001498 return drm_platform_init(&driver, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499}
1500
1501void kgsl_drm_exit(void)
1502{
Michael Street8bacdd02012-01-05 14:55:01 -08001503 kgsl_drm_inited = DRM_KGSL_NOT_INITED;
1504 drm_platform_exit(&driver, driver.kdriver.platform_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505}