blob: 2a5a5fac29698a5ee2008af8d93d9f8ac9013007 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13/* Implements an interface between KGSL and the DRM subsystem. For now this
14 * is pretty simple, but it will take on more of the workload as time goes
15 * on
16 */
17#include "drmP.h"
18#include "drm.h"
19#include <linux/android_pmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020
21#include "kgsl.h"
22#include "kgsl_device.h"
23#include "kgsl_drm.h"
24#include "kgsl_mmu.h"
25#include "kgsl_sharedmem.h"
26
27#define DRIVER_AUTHOR "Qualcomm"
28#define DRIVER_NAME "kgsl"
29#define DRIVER_DESC "KGSL DRM"
30#define DRIVER_DATE "20100127"
31
32#define DRIVER_MAJOR 2
33#define DRIVER_MINOR 1
34#define DRIVER_PATCHLEVEL 1
35
36#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
37
38#define ENTRY_EMPTY -1
39#define ENTRY_NEEDS_CLEANUP -2
40
Michael Street8bacdd02012-01-05 14:55:01 -080041#define DRM_KGSL_NOT_INITED -1
42#define DRM_KGSL_INITED 1
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
45#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
46
47/* Returns true if the memory type is in PMEM */
48
49#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
50#define TYPE_IS_PMEM(_t) \
51 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
52 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
53 ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
54#else
55#define TYPE_IS_PMEM(_t) \
56 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
57 ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
58#endif
59
60/* Returns true if the memory type is regular */
61
62#define TYPE_IS_MEM(_t) \
63 (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
64 ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
65 ((_t) & DRM_KGSL_GEM_TYPE_MEM))
66
67#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
68
69/* Returns true if KMEM region is uncached */
70
71#define IS_MEM_UNCACHED(_t) \
72 ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
73 (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
74 (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
75
76struct drm_kgsl_gem_object_wait_list_entry {
77 struct list_head list;
78 int pid;
79 int in_use;
80 wait_queue_head_t process_wait_q;
81};
82
83struct drm_kgsl_gem_object_fence {
84 int32_t fence_id;
85 unsigned int num_buffers;
86 int ts_valid;
87 unsigned int timestamp;
88 int ts_device;
89 int lockpid;
90 struct list_head buffers_in_fence;
91};
92
93struct drm_kgsl_gem_object_fence_list_entry {
94 struct list_head list;
95 int in_use;
96 struct drm_gem_object *gem_obj;
97};
98
99static int32_t fence_id = 0x1;
100
101static struct drm_kgsl_gem_object_fence
102 gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
103
104struct drm_kgsl_gem_object {
105 struct drm_gem_object *obj;
106 uint32_t type;
107 struct kgsl_memdesc memdesc;
108 struct kgsl_pagetable *pagetable;
109 uint64_t mmap_offset;
110 int bufcount;
111 int flags;
112 struct list_head list;
113 int active;
114
115 struct {
116 uint32_t offset;
117 uint32_t gpuaddr;
118 } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
119
120 int bound;
121 int lockpid;
122 /* Put these here to avoid allocing all the time */
123 struct drm_kgsl_gem_object_wait_list_entry
124 wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
125 /* Each object can only appear in a single fence */
126 struct drm_kgsl_gem_object_fence_list_entry
127 fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
128
129 struct list_head wait_list;
130};
131
Michael Street8bacdd02012-01-05 14:55:01 -0800132static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
133
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134/* This is a global list of all the memory currently mapped in the MMU */
135static struct list_head kgsl_mem_list;
136
137static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
138{
139 int cacheop = 0;
140
141 switch (op) {
142 case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
143 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
144 DRM_KGSL_GEM_CACHE_WBACKWA))
145 cacheop = KGSL_CACHE_OP_CLEAN;
146
147 break;
148
149 case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
150 if (type & (DRM_KGSL_GEM_CACHE_WBACK |
151 DRM_KGSL_GEM_CACHE_WBACKWA |
152 DRM_KGSL_GEM_CACHE_WTHROUGH))
153 cacheop = KGSL_CACHE_OP_INV;
154 }
155
156 kgsl_cache_range_op(memdesc, cacheop);
157}
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159/* TODO:
160 * Add vsync wait */
161
162static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
163{
164 return 0;
165}
166
167static int kgsl_drm_unload(struct drm_device *dev)
168{
169 return 0;
170}
171
172struct kgsl_drm_device_priv {
173 struct kgsl_device *device[KGSL_DEVICE_MAX];
174 struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
175};
176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
178{
179}
180
181static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
182{
183 return 0;
184}
185
186static int kgsl_drm_resume(struct drm_device *dev)
187{
188 return 0;
189}
190
191static void
192kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
193{
194 struct drm_device *dev = obj->dev;
195 struct drm_gem_mm *mm = dev->mm_private;
196 struct drm_kgsl_gem_object *priv = obj->driver_private;
197 struct drm_map_list *list;
198
199 list = &obj->map_list;
200 drm_ht_remove_item(&mm->offset_hash, &list->hash);
201 if (list->file_offset_node) {
202 drm_mm_put_block(list->file_offset_node);
203 list->file_offset_node = NULL;
204 }
205
206 kfree(list->map);
207 list->map = NULL;
208
209 priv->mmap_offset = 0;
210}
211
212static int
213kgsl_gem_memory_allocated(struct drm_gem_object *obj)
214{
215 struct drm_kgsl_gem_object *priv = obj->driver_private;
216 return priv->memdesc.size ? 1 : 0;
217}
218
219static int
220kgsl_gem_alloc_memory(struct drm_gem_object *obj)
221{
222 struct drm_kgsl_gem_object *priv = obj->driver_private;
223 int index;
Michael Street8bacdd02012-01-05 14:55:01 -0800224 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225
226 /* Return if the memory is already allocated */
227
228 if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
229 return 0;
230
Michael Street8bacdd02012-01-05 14:55:01 -0800231 if (priv->pagetable == NULL) {
232 priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
233
234 if (priv->pagetable == NULL) {
235 DRM_ERROR("Unable to get the GPU MMU pagetable\n");
236 return -EINVAL;
237 }
238 }
239
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600240 /* Set the flags for the memdesc (probably 0, unless it is cached) */
241 priv->memdesc.priv = 0;
242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700243 if (TYPE_IS_PMEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
Michael Street8bacdd02012-01-05 14:55:01 -0800245 priv->type & DRM_KGSL_GEM_PMEM_EBI) {
Michael Street8bacdd02012-01-05 14:55:01 -0800246 result = kgsl_sharedmem_ebimem_user(
247 &priv->memdesc,
248 priv->pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600249 obj->size * priv->bufcount);
Michael Street8bacdd02012-01-05 14:55:01 -0800250 if (result) {
251 DRM_ERROR(
252 "Unable to allocate PMEM memory\n");
253 return result;
254 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 }
Michael Street8bacdd02012-01-05 14:55:01 -0800256 else
257 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
259 } else if (TYPE_IS_MEM(priv->type)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260
Michael Street8bacdd02012-01-05 14:55:01 -0800261 if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
262 priv->type & DRM_KGSL_GEM_CACHE_MASK)
263 list_add(&priv->list, &kgsl_mem_list);
264
Harsh Vardhan Dwivedif99c2632012-03-15 14:17:11 -0600265 result = kgsl_sharedmem_page_alloc_user(&priv->memdesc,
Michael Street8bacdd02012-01-05 14:55:01 -0800266 priv->pagetable,
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600267 obj->size * priv->bufcount);
Michael Street8bacdd02012-01-05 14:55:01 -0800268
269 if (result != 0) {
270 DRM_ERROR(
271 "Unable to allocate Vmalloc user memory\n");
272 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 } else
275 return -EINVAL;
276
Michael Street8bacdd02012-01-05 14:55:01 -0800277 for (index = 0; index < priv->bufcount; index++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 priv->bufs[index].offset = index * obj->size;
Michael Street8bacdd02012-01-05 14:55:01 -0800279 priv->bufs[index].gpuaddr =
280 priv->memdesc.gpuaddr +
281 priv->bufs[index].offset;
282 }
283 priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284
285 return 0;
286}
287
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289kgsl_gem_free_memory(struct drm_gem_object *obj)
290{
291 struct drm_kgsl_gem_object *priv = obj->driver_private;
292
293 if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
294 return;
295
296 kgsl_gem_mem_flush(&priv->memdesc, priv->type,
297 DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 kgsl_sharedmem_free(&priv->memdesc);
Michael Street8bacdd02012-01-05 14:55:01 -0800300
301 kgsl_mmu_putpagetable(priv->pagetable);
302 priv->pagetable = NULL;
303
304 if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
305 (priv->type & DRM_KGSL_GEM_CACHE_MASK))
306 list_del(&priv->list);
307
308 priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310}
311
312int
313kgsl_gem_init_object(struct drm_gem_object *obj)
314{
315 struct drm_kgsl_gem_object *priv;
316 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
317 if (priv == NULL) {
318 DRM_ERROR("Unable to create GEM object\n");
319 return -ENOMEM;
320 }
321
322 obj->driver_private = priv;
323 priv->obj = obj;
324
325 return 0;
326}
327
328void
329kgsl_gem_free_object(struct drm_gem_object *obj)
330{
331 kgsl_gem_free_memory(obj);
332 kgsl_gem_free_mmap_offset(obj);
333 drm_gem_object_release(obj);
334 kfree(obj->driver_private);
335}
336
337static int
338kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
339{
340 struct drm_device *dev = obj->dev;
341 struct drm_gem_mm *mm = dev->mm_private;
342 struct drm_kgsl_gem_object *priv = obj->driver_private;
343 struct drm_map_list *list;
344 int msize;
345
346 list = &obj->map_list;
347 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
348 if (list->map == NULL) {
349 DRM_ERROR("Unable to allocate drm_map_list\n");
350 return -ENOMEM;
351 }
352
353 msize = obj->size * priv->bufcount;
354
355 list->map->type = _DRM_GEM;
356 list->map->size = msize;
357 list->map->handle = obj;
358
359 /* Allocate a mmap offset */
360 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
361 msize / PAGE_SIZE,
362 0, 0);
363
364 if (!list->file_offset_node) {
365 DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
366 kfree(list->map);
367 return -ENOMEM;
368 }
369
370 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
371 msize / PAGE_SIZE, 0);
372
373 if (!list->file_offset_node) {
374 DRM_ERROR("Unable to create the file_offset_node\n");
375 kfree(list->map);
376 return -ENOMEM;
377 }
378
379 list->hash.key = list->file_offset_node->start;
380 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
381 DRM_ERROR("Failed to add to map hash\n");
382 drm_mm_put_block(list->file_offset_node);
383 kfree(list->map);
384 return -ENOMEM;
385 }
386
387 priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
388
389 return 0;
390}
391
392int
393kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
394 unsigned long *len)
395{
396 struct file *filp;
397 struct drm_device *dev;
398 struct drm_file *file_priv;
399 struct drm_gem_object *obj;
400 struct drm_kgsl_gem_object *priv;
401 int ret = 0;
402
403 filp = fget(drm_fd);
404 if (unlikely(filp == NULL)) {
Michael Street8bacdd02012-01-05 14:55:01 -0800405 DRM_ERROR("Unable to get the DRM file descriptor\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 return -EINVAL;
407 }
408 file_priv = filp->private_data;
409 if (unlikely(file_priv == NULL)) {
410 DRM_ERROR("Unable to get the file private data\n");
411 fput(filp);
412 return -EINVAL;
413 }
414 dev = file_priv->minor->dev;
415 if (unlikely(dev == NULL)) {
416 DRM_ERROR("Unable to get the minor device\n");
417 fput(filp);
418 return -EINVAL;
419 }
420
421 obj = drm_gem_object_lookup(dev, file_priv, handle);
422 if (unlikely(obj == NULL)) {
423 DRM_ERROR("Invalid GEM handle %x\n", handle);
424 fput(filp);
425 return -EBADF;
426 }
427
428 mutex_lock(&dev->struct_mutex);
429 priv = obj->driver_private;
430
431 /* We can only use the MDP for PMEM regions */
432
433 if (TYPE_IS_PMEM(priv->type)) {
434 *start = priv->memdesc.physaddr +
435 priv->bufs[priv->active].offset;
436
437 *len = priv->memdesc.size;
438
439 kgsl_gem_mem_flush(&priv->memdesc,
440 priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
441 } else {
442 *start = 0;
443 *len = 0;
444 ret = -EINVAL;
445 }
446
447 drm_gem_object_unreference(obj);
448 mutex_unlock(&dev->struct_mutex);
449
450 fput(filp);
451 return ret;
452}
453
454static int
455kgsl_gem_init_obj(struct drm_device *dev,
456 struct drm_file *file_priv,
457 struct drm_gem_object *obj,
458 int *handle)
459{
460 struct drm_kgsl_gem_object *priv;
461 int ret, i;
462
463 mutex_lock(&dev->struct_mutex);
464 priv = obj->driver_private;
465
466 memset(&priv->memdesc, 0, sizeof(priv->memdesc));
467 priv->bufcount = 1;
468 priv->active = 0;
469 priv->bound = 0;
470
471 /* To preserve backwards compatability, the default memory source
472 is EBI */
473
474 priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
475
476 ret = drm_gem_handle_create(file_priv, obj, handle);
477
Michael Street8bacdd02012-01-05 14:55:01 -0800478 drm_gem_object_unreference(obj);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 INIT_LIST_HEAD(&priv->wait_list);
480
481 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
482 INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
483 priv->wait_entries[i].pid = 0;
484 init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
485 }
486
487 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
488 INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
489 priv->fence_entries[i].in_use = 0;
490 priv->fence_entries[i].gem_obj = obj;
491 }
492
493 mutex_unlock(&dev->struct_mutex);
494 return ret;
495}
496
497int
498kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
499 struct drm_file *file_priv)
500{
501 struct drm_kgsl_gem_create *create = data;
502 struct drm_gem_object *obj;
503 int ret, handle;
504
505 /* Page align the size so we can allocate multiple buffers */
506 create->size = ALIGN(create->size, 4096);
507
508 obj = drm_gem_object_alloc(dev, create->size);
509
510 if (obj == NULL) {
511 DRM_ERROR("Unable to allocate the GEM object\n");
512 return -ENOMEM;
513 }
514
515 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
516 if (ret)
517 return ret;
518
519 create->handle = handle;
520 return 0;
521}
522
523int
524kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
525 struct drm_file *file_priv)
526{
527 struct drm_kgsl_gem_create_fd *args = data;
528 struct file *file;
529 dev_t rdev;
530 struct fb_info *info;
531 struct drm_gem_object *obj;
532 struct drm_kgsl_gem_object *priv;
533 int ret, put_needed, handle;
534
535 file = fget_light(args->fd, &put_needed);
536
537 if (file == NULL) {
538 DRM_ERROR("Unable to get the file object\n");
539 return -EBADF;
540 }
541
542 rdev = file->f_dentry->d_inode->i_rdev;
543
544 /* Only framebuffer objects are supported ATM */
545
546 if (MAJOR(rdev) != FB_MAJOR) {
547 DRM_ERROR("File descriptor is not a framebuffer\n");
548 ret = -EBADF;
549 goto error_fput;
550 }
551
552 info = registered_fb[MINOR(rdev)];
553
554 if (info == NULL) {
555 DRM_ERROR("Framebuffer minor %d is not registered\n",
556 MINOR(rdev));
557 ret = -EBADF;
558 goto error_fput;
559 }
560
561 obj = drm_gem_object_alloc(dev, info->fix.smem_len);
562
563 if (obj == NULL) {
564 DRM_ERROR("Unable to allocate GEM object\n");
565 ret = -ENOMEM;
566 goto error_fput;
567 }
568
569 ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
570
571 if (ret)
572 goto error_fput;
573
574 mutex_lock(&dev->struct_mutex);
575
576 priv = obj->driver_private;
577 priv->memdesc.physaddr = info->fix.smem_start;
578 priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
579
580 mutex_unlock(&dev->struct_mutex);
581 args->handle = handle;
582
583error_fput:
584 fput_light(file, put_needed);
585
586 return ret;
587}
588
589int
590kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
591 struct drm_file *file_priv)
592{
593 struct drm_kgsl_gem_memtype *args = data;
594 struct drm_gem_object *obj;
595 struct drm_kgsl_gem_object *priv;
596 int ret = 0;
597
598 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
599
600 if (obj == NULL) {
601 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
602 return -EBADF;
603 }
604
605 mutex_lock(&dev->struct_mutex);
606 priv = obj->driver_private;
607
608 if (TYPE_IS_FD(priv->type))
609 ret = -EINVAL;
610 else {
611 if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
612 priv->type = args->type;
613 else
614 ret = -EINVAL;
615 }
616
617 drm_gem_object_unreference(obj);
618 mutex_unlock(&dev->struct_mutex);
619
620 return ret;
621}
622
623int
624kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
625 struct drm_file *file_priv)
626{
627 struct drm_kgsl_gem_memtype *args = data;
628 struct drm_gem_object *obj;
629 struct drm_kgsl_gem_object *priv;
630
631 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
632
633 if (obj == NULL) {
634 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
635 return -EBADF;
636 }
637
638 mutex_lock(&dev->struct_mutex);
639 priv = obj->driver_private;
640
641 args->type = priv->type;
642
643 drm_gem_object_unreference(obj);
644 mutex_unlock(&dev->struct_mutex);
645
646 return 0;
647}
648
649int
650kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
651 struct drm_file *file_priv)
652{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 return 0;
654}
655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656int
657kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
658 struct drm_file *file_priv)
659{
Michael Street8bacdd02012-01-05 14:55:01 -0800660 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661}
662
663/* Allocate the memory and prepare it for CPU mapping */
664
665int
666kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *file_priv)
668{
669 struct drm_kgsl_gem_alloc *args = data;
670 struct drm_gem_object *obj;
671 struct drm_kgsl_gem_object *priv;
672 int ret;
673
674 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
675
676 if (obj == NULL) {
677 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
678 return -EBADF;
679 }
680
681 mutex_lock(&dev->struct_mutex);
682 priv = obj->driver_private;
683
684 ret = kgsl_gem_alloc_memory(obj);
685
686 if (ret) {
687 DRM_ERROR("Unable to allocate object memory\n");
688 } else if (!priv->mmap_offset) {
689 ret = kgsl_gem_create_mmap_offset(obj);
690 if (ret)
691 DRM_ERROR("Unable to create a mmap offset\n");
692 }
693
694 args->offset = priv->mmap_offset;
695
696 drm_gem_object_unreference(obj);
697 mutex_unlock(&dev->struct_mutex);
698
699 return ret;
700}
701
702int
703kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 struct drm_kgsl_gem_mmap *args = data;
707 struct drm_gem_object *obj;
708 unsigned long addr;
709
710 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
711
712 if (obj == NULL) {
713 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
714 return -EBADF;
715 }
716
717 down_write(&current->mm->mmap_sem);
718
719 addr = do_mmap(obj->filp, 0, args->size,
720 PROT_READ | PROT_WRITE, MAP_SHARED,
721 args->offset);
722
723 up_write(&current->mm->mmap_sem);
724
725 mutex_lock(&dev->struct_mutex);
726 drm_gem_object_unreference(obj);
727 mutex_unlock(&dev->struct_mutex);
728
729 if (IS_ERR((void *) addr))
730 return addr;
731
732 args->hostptr = (uint32_t) addr;
733 return 0;
734}
735
736/* This function is deprecated */
737
738int
739kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
740 struct drm_file *file_priv)
741{
742 struct drm_kgsl_gem_prep *args = data;
743 struct drm_gem_object *obj;
744 struct drm_kgsl_gem_object *priv;
745 int ret;
746
747 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
748
749 if (obj == NULL) {
750 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
751 return -EBADF;
752 }
753
754 mutex_lock(&dev->struct_mutex);
755 priv = obj->driver_private;
756
757 ret = kgsl_gem_alloc_memory(obj);
758 if (ret) {
759 DRM_ERROR("Unable to allocate object memory\n");
760 drm_gem_object_unreference(obj);
761 mutex_unlock(&dev->struct_mutex);
762 return ret;
763 }
764
765 if (priv->mmap_offset == 0) {
766 ret = kgsl_gem_create_mmap_offset(obj);
767 if (ret) {
768 drm_gem_object_unreference(obj);
769 mutex_unlock(&dev->struct_mutex);
770 return ret;
771 }
772 }
773
774 args->offset = priv->mmap_offset;
775 args->phys = priv->memdesc.physaddr;
776
777 drm_gem_object_unreference(obj);
778 mutex_unlock(&dev->struct_mutex);
779
780 return 0;
781}
782
783int
784kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
785 struct drm_file *file_priv)
786{
787 struct drm_kgsl_gem_bufinfo *args = data;
788 struct drm_gem_object *obj;
789 struct drm_kgsl_gem_object *priv;
790 int ret = -EINVAL;
791 int index;
792
793 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
794
795 if (obj == NULL) {
796 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
797 return -EBADF;
798 }
799
800 mutex_lock(&dev->struct_mutex);
801 priv = obj->driver_private;
802
803 if (!kgsl_gem_memory_allocated(obj)) {
804 DRM_ERROR("Memory not allocated for this object\n");
805 goto out;
806 }
807
808 for (index = 0; index < priv->bufcount; index++) {
809 args->offset[index] = priv->bufs[index].offset;
810 args->gpuaddr[index] = priv->bufs[index].gpuaddr;
811 }
812
813 args->count = priv->bufcount;
814 args->active = priv->active;
815
816 ret = 0;
817
818out:
819 drm_gem_object_unreference(obj);
820 mutex_unlock(&dev->struct_mutex);
821
822 return ret;
823}
824
825int
826kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
827 struct drm_file *file_priv)
828{
829 struct drm_kgsl_gem_bufcount *args = data;
830 struct drm_gem_object *obj;
831 struct drm_kgsl_gem_object *priv;
832 int ret = -EINVAL;
833
834 if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
835 return -EINVAL;
836
837 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
838
839 if (obj == NULL) {
840 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
841 return -EBADF;
842 }
843
844 mutex_lock(&dev->struct_mutex);
845 priv = obj->driver_private;
846
847 /* It is too much math to worry about what happens if we are already
848 allocated, so just bail if we are */
849
850 if (kgsl_gem_memory_allocated(obj)) {
851 DRM_ERROR("Memory already allocated - cannot change"
852 "number of buffers\n");
853 goto out;
854 }
855
856 priv->bufcount = args->bufcount;
857 ret = 0;
858
859out:
860 drm_gem_object_unreference(obj);
861 mutex_unlock(&dev->struct_mutex);
862
863 return ret;
864}
865
866int
867kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
868 struct drm_file *file_priv)
869{
870 struct drm_kgsl_gem_active *args = data;
871 struct drm_gem_object *obj;
872 struct drm_kgsl_gem_object *priv;
873 int ret = -EINVAL;
874
875 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
876
877 if (obj == NULL) {
878 DRM_ERROR("Invalid GEM handle %x\n", args->handle);
879 return -EBADF;
880 }
881
882 mutex_lock(&dev->struct_mutex);
883 priv = obj->driver_private;
884
885 if (args->active < 0 || args->active >= priv->bufcount) {
886 DRM_ERROR("Invalid active buffer %d\n", args->active);
887 goto out;
888 }
889
890 priv->active = args->active;
891 ret = 0;
892
893out:
894 drm_gem_object_unreference(obj);
895 mutex_unlock(&dev->struct_mutex);
896
897 return ret;
898}
899
900int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
901{
902 struct drm_gem_object *obj = vma->vm_private_data;
903 struct drm_device *dev = obj->dev;
904 struct drm_kgsl_gem_object *priv;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600905 unsigned long offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 struct page *page;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600907 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700908
909 mutex_lock(&dev->struct_mutex);
910
911 priv = obj->driver_private;
912
913 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600914 i = offset >> PAGE_SHIFT;
915 page = sg_page(&(priv->memdesc.sg[i]));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 if (!page) {
918 mutex_unlock(&dev->struct_mutex);
919 return VM_FAULT_SIGBUS;
920 }
921
922 get_page(page);
923 vmf->page = page;
924
925 mutex_unlock(&dev->struct_mutex);
926 return 0;
927}
928
929int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
930{
931 struct drm_gem_object *obj = vma->vm_private_data;
932 struct drm_device *dev = obj->dev;
933 struct drm_kgsl_gem_object *priv;
934 unsigned long offset, pfn;
935 int ret = 0;
936
937 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
938 PAGE_SHIFT;
939
940 mutex_lock(&dev->struct_mutex);
941
942 priv = obj->driver_private;
943
944 pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
945 ret = vm_insert_pfn(vma,
946 (unsigned long) vmf->virtual_address, pfn);
947 mutex_unlock(&dev->struct_mutex);
948
949 switch (ret) {
950 case -ENOMEM:
951 case -EAGAIN:
952 return VM_FAULT_OOM;
953 case -EFAULT:
954 return VM_FAULT_SIGBUS;
955 default:
956 return VM_FAULT_NOPAGE;
957 }
958}
959
960static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
961 .fault = kgsl_gem_kmem_fault,
962 .open = drm_gem_vm_open,
963 .close = drm_gem_vm_close,
964};
965
966static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
967 .fault = kgsl_gem_phys_fault,
968 .open = drm_gem_vm_open,
969 .close = drm_gem_vm_close,
970};
971
972/* This is a clone of the standard drm_gem_mmap function modified to allow
973 us to properly map KMEM regions as well as the PMEM regions */
974
975int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
976{
977 struct drm_file *priv = filp->private_data;
978 struct drm_device *dev = priv->minor->dev;
979 struct drm_gem_mm *mm = dev->mm_private;
980 struct drm_local_map *map = NULL;
981 struct drm_gem_object *obj;
982 struct drm_hash_item *hash;
983 struct drm_kgsl_gem_object *gpriv;
984 int ret = 0;
985
986 mutex_lock(&dev->struct_mutex);
987
988 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
989 mutex_unlock(&dev->struct_mutex);
990 return drm_mmap(filp, vma);
991 }
992
993 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
994 if (!map ||
995 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
996 ret = -EPERM;
997 goto out_unlock;
998 }
999
1000 /* Check for valid size. */
1001 if (map->size < vma->vm_end - vma->vm_start) {
1002 ret = -EINVAL;
1003 goto out_unlock;
1004 }
1005
1006 obj = map->handle;
1007
1008 gpriv = obj->driver_private;
1009
1010 /* VM_PFNMAP is only for memory that doesn't use struct page
1011 * in other words, not "normal" memory. If you try to use it
1012 * with "normal" memory then the mappings don't get flushed. */
1013
1014 if (TYPE_IS_MEM(gpriv->type)) {
1015 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1016 vma->vm_ops = &kgsl_gem_kmem_vm_ops;
1017 } else {
1018 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
1019 VM_DONTEXPAND;
1020 vma->vm_ops = &kgsl_gem_phys_vm_ops;
1021 }
1022
1023 vma->vm_private_data = map->handle;
1024
1025
1026 /* Take care of requested caching policy */
1027 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
1028 gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
1029 if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
1030 vma->vm_page_prot =
1031 pgprot_writebackwacache(vma->vm_page_prot);
1032 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
1033 vma->vm_page_prot =
1034 pgprot_writebackcache(vma->vm_page_prot);
1035 else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
1036 vma->vm_page_prot =
1037 pgprot_writethroughcache(vma->vm_page_prot);
1038 else
1039 vma->vm_page_prot =
1040 pgprot_writecombine(vma->vm_page_prot);
1041 } else {
1042 if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
1043 vma->vm_page_prot =
1044 pgprot_noncached(vma->vm_page_prot);
1045 else
1046 /* default pmem is WC */
1047 vma->vm_page_prot =
1048 pgprot_writecombine(vma->vm_page_prot);
1049 }
1050
1051 /* flush out existing KMEM cached mappings if new ones are
1052 * of uncached type */
1053 if (IS_MEM_UNCACHED(gpriv->type))
1054 kgsl_cache_range_op(&gpriv->memdesc,
1055 KGSL_CACHE_OP_FLUSH);
1056
1057 /* Add the other memory types here */
1058
1059 /* Take a ref for this mapping of the object, so that the fault
1060 * handler can dereference the mmap offset's pointer to the object.
1061 * This reference is cleaned up by the corresponding vm_close
1062 * (which should happen whether the vma was created by this call, or
1063 * by a vm_open due to mremap or partial unmap or whatever).
1064 */
1065 drm_gem_object_reference(obj);
1066
1067 vma->vm_file = filp; /* Needed for drm_vm_open() */
1068 drm_vm_open_locked(vma);
1069
1070out_unlock:
1071 mutex_unlock(&dev->struct_mutex);
1072
1073 return ret;
1074}
1075
1076void
1077cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
1078{
1079 int j;
1080 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1081 struct drm_kgsl_gem_object *unlock_obj;
1082 struct drm_gem_object *obj;
1083 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1084
1085 fence->ts_valid = 0;
1086 fence->timestamp = -1;
1087 fence->ts_device = -1;
1088
1089 /* Walk the list of buffers in this fence and clean up the */
1090 /* references. Note that this can cause memory allocations */
1091 /* to be freed */
1092 for (j = fence->num_buffers; j > 0; j--) {
1093 this_fence_entry =
1094 (struct drm_kgsl_gem_object_fence_list_entry *)
1095 fence->buffers_in_fence.prev;
1096
1097 this_fence_entry->in_use = 0;
1098 obj = this_fence_entry->gem_obj;
1099 unlock_obj = obj->driver_private;
1100
1101 /* Delete it from the list */
1102
1103 list_del(&this_fence_entry->list);
1104
1105 /* we are unlocking - see if there are other pids waiting */
1106 if (check_waiting) {
1107 if (!list_empty(&unlock_obj->wait_list)) {
1108 lock_next =
1109 (struct drm_kgsl_gem_object_wait_list_entry *)
1110 unlock_obj->wait_list.prev;
1111
1112 list_del((struct list_head *)&lock_next->list);
1113
1114 unlock_obj->lockpid = 0;
1115 wake_up_interruptible(
1116 &lock_next->process_wait_q);
1117 lock_next->pid = 0;
1118
1119 } else {
1120 /* List is empty so set pid to 0 */
1121 unlock_obj->lockpid = 0;
1122 }
1123 }
1124
1125 drm_gem_object_unreference(obj);
1126 }
1127 /* here all the buffers in the fence are released */
1128 /* clear the fence entry */
1129 fence->fence_id = ENTRY_EMPTY;
1130}
1131
1132int
1133find_empty_fence(void)
1134{
1135 int i;
1136
1137 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1138 if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
1139 gem_buf_fence[i].fence_id = fence_id++;
1140 gem_buf_fence[i].ts_valid = 0;
1141 INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
1142 if (fence_id == 0xFFFFFFF0)
1143 fence_id = 1;
1144 return i;
1145 } else {
1146
1147 /* Look for entries to be cleaned up */
1148 if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
1149 cleanup_fence(&gem_buf_fence[i], 0);
1150 }
1151 }
1152
1153 return ENTRY_EMPTY;
1154}
1155
1156int
1157find_fence(int index)
1158{
1159 int i;
1160
1161 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1162 if (gem_buf_fence[i].fence_id == index)
1163 return i;
1164 }
1165
1166 return ENTRY_EMPTY;
1167}
1168
1169void
1170wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
1171{
1172 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1173 struct drm_kgsl_gem_object_wait_list_entry *lock_next;
1174 struct drm_kgsl_gem_object *unlock_obj;
1175 struct drm_gem_object *obj;
1176
1177 /* TS has expired when we get here */
1178 fence->ts_valid = 0;
1179 fence->timestamp = -1;
1180 fence->ts_device = -1;
1181
1182 list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
1183 obj = this_fence_entry->gem_obj;
1184 unlock_obj = obj->driver_private;
1185
1186 if (!list_empty(&unlock_obj->wait_list)) {
1187 lock_next =
1188 (struct drm_kgsl_gem_object_wait_list_entry *)
1189 unlock_obj->wait_list.prev;
1190
1191 /* Unblock the pid */
1192 lock_next->pid = 0;
1193
1194 /* Delete it from the list */
1195 list_del((struct list_head *)&lock_next->list);
1196
1197 unlock_obj->lockpid = 0;
1198 wake_up_interruptible(&lock_next->process_wait_q);
1199
1200 } else {
1201 /* List is empty so set pid to 0 */
1202 unlock_obj->lockpid = 0;
1203 }
1204 }
1205 fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
1206}
1207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208int
1209kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
1210 struct drm_file *file_priv)
1211{
1212 /* The purpose of this function is to lock a given set of handles. */
1213 /* The driver will maintain a list of locked handles. */
1214 /* If a request comes in for a handle that's locked the thread will */
1215 /* block until it's no longer in use. */
1216
1217 struct drm_kgsl_gem_lock_handles *args = data;
1218 struct drm_gem_object *obj;
1219 struct drm_kgsl_gem_object *priv;
1220 struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
1221 struct drm_kgsl_gem_object_fence *fence;
1222 struct drm_kgsl_gem_object_wait_list_entry *lock_item;
1223 int i, j;
1224 int result = 0;
1225 uint32_t *lock_list;
1226 uint32_t *work_list = NULL;
1227 int32_t fence_index;
1228
1229 /* copy in the data from user space */
1230 lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
1231 if (!lock_list) {
1232 DRM_ERROR("Unable allocate memory for lock list\n");
1233 result = -ENOMEM;
1234 goto error;
1235 }
1236
1237 if (copy_from_user(lock_list, args->handle_list,
1238 sizeof(uint32_t) * args->num_handles)) {
1239 DRM_ERROR("Unable to copy the lock list from the user\n");
1240 result = -EFAULT;
1241 goto free_handle_list;
1242 }
1243
1244
1245 work_list = lock_list;
1246 mutex_lock(&dev->struct_mutex);
1247
1248 /* build the fence for this group of handles */
1249 fence_index = find_empty_fence();
1250 if (fence_index == ENTRY_EMPTY) {
1251 DRM_ERROR("Unable to find a empty fence\n");
1252 args->lock_id = 0xDEADBEEF;
1253 result = -EFAULT;
1254 goto out_unlock;
1255 }
1256
1257 fence = &gem_buf_fence[fence_index];
1258 gem_buf_fence[fence_index].num_buffers = args->num_handles;
1259 args->lock_id = gem_buf_fence[fence_index].fence_id;
1260
1261 for (j = args->num_handles; j > 0; j--, lock_list++) {
1262 obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
1263
1264 if (obj == NULL) {
1265 DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
1266 result = -EBADF;
1267 goto out_unlock;
1268 }
1269
1270 priv = obj->driver_private;
1271 this_fence_entry = NULL;
1272
1273 /* get a fence entry to hook into the fence */
1274 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1275 if (!priv->fence_entries[i].in_use) {
1276 this_fence_entry = &priv->fence_entries[i];
1277 this_fence_entry->in_use = 1;
1278 break;
1279 }
1280 }
1281
1282 if (this_fence_entry == NULL) {
1283 fence->num_buffers = 0;
1284 fence->fence_id = ENTRY_EMPTY;
1285 args->lock_id = 0xDEADBEAD;
1286 result = -EFAULT;
1287 drm_gem_object_unreference(obj);
1288 goto out_unlock;
1289 }
1290
1291 /* We're trying to lock - add to a fence */
1292 list_add((struct list_head *)this_fence_entry,
1293 &gem_buf_fence[fence_index].buffers_in_fence);
1294 if (priv->lockpid) {
1295
1296 if (priv->lockpid == args->pid) {
1297 /* now that things are running async this */
1298 /* happens when an op isn't done */
1299 /* so it's already locked by the calling pid */
1300 continue;
1301 }
1302
1303
1304 /* if a pid already had it locked */
1305 /* create and add to wait list */
1306 for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
1307 if (priv->wait_entries[i].in_use == 0) {
1308 /* this one is empty */
1309 lock_item = &priv->wait_entries[i];
1310 lock_item->in_use = 1;
1311 lock_item->pid = args->pid;
1312 INIT_LIST_HEAD((struct list_head *)
1313 &priv->wait_entries[i]);
1314 break;
1315 }
1316 }
1317
1318 if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
1319
1320 result = -EFAULT;
1321 drm_gem_object_unreference(obj);
1322 goto out_unlock;
1323 }
1324
1325 list_add_tail((struct list_head *)&lock_item->list,
1326 &priv->wait_list);
1327 mutex_unlock(&dev->struct_mutex);
1328 /* here we need to block */
1329 wait_event_interruptible_timeout(
1330 priv->wait_entries[i].process_wait_q,
1331 (priv->lockpid == 0),
1332 msecs_to_jiffies(64));
1333 mutex_lock(&dev->struct_mutex);
1334 lock_item->in_use = 0;
1335 }
1336
1337 /* Getting here means no one currently holds the lock */
1338 priv->lockpid = args->pid;
1339
1340 args->lock_id = gem_buf_fence[fence_index].fence_id;
1341 }
1342 fence->lockpid = args->pid;
1343
1344out_unlock:
1345 mutex_unlock(&dev->struct_mutex);
1346
1347free_handle_list:
1348 kfree(work_list);
1349
1350error:
1351 return result;
1352}
1353
1354int
1355kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
1356 struct drm_file *file_priv)
1357{
1358 struct drm_kgsl_gem_unlock_handles *args = data;
1359 int result = 0;
1360 int32_t fence_index;
1361
1362 mutex_lock(&dev->struct_mutex);
1363 fence_index = find_fence(args->lock_id);
1364 if (fence_index == ENTRY_EMPTY) {
1365 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1366 result = -EFAULT;
1367 goto out_unlock;
1368 }
1369
1370 cleanup_fence(&gem_buf_fence[fence_index], 1);
1371
1372out_unlock:
1373 mutex_unlock(&dev->struct_mutex);
1374
1375 return result;
1376}
1377
1378
1379int
1380kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
1381 struct drm_file *file_priv)
1382{
1383 struct drm_kgsl_gem_unlock_on_ts *args = data;
1384 int result = 0;
1385 int ts_done = 0;
1386 int32_t fence_index, ts_device;
1387 struct drm_kgsl_gem_object_fence *fence;
1388 struct kgsl_device *device;
1389
1390 if (args->type == DRM_KGSL_GEM_TS_3D)
1391 ts_device = KGSL_DEVICE_3D0;
1392 else if (args->type == DRM_KGSL_GEM_TS_2D)
1393 ts_device = KGSL_DEVICE_2D0;
1394 else {
1395 result = -EINVAL;
1396 goto error;
1397 }
1398
1399 device = kgsl_get_device(ts_device);
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001400 ts_done = kgsl_check_timestamp(device, NULL, args->timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001401
1402 mutex_lock(&dev->struct_mutex);
1403
1404 fence_index = find_fence(args->lock_id);
1405 if (fence_index == ENTRY_EMPTY) {
1406 DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
1407 result = -EFAULT;
1408 goto out_unlock;
1409 }
1410
1411 fence = &gem_buf_fence[fence_index];
1412 fence->ts_device = ts_device;
1413
1414 if (!ts_done)
1415 fence->ts_valid = 1;
1416 else
1417 cleanup_fence(fence, 1);
1418
1419
1420out_unlock:
1421 mutex_unlock(&dev->struct_mutex);
1422
1423error:
1424 return result;
1425}
1426
1427struct drm_ioctl_desc kgsl_drm_ioctls[] = {
1428 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
1429 DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
1430 DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
1431 DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
1432 DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
1433 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
1434 DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
1435 DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
1436 DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
1437 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
1438 kgsl_gem_set_bufcount_ioctl, 0),
1439 DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
1440 DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
1441 kgsl_gem_lock_handle_ioctl, 0),
1442 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
1443 kgsl_gem_unlock_handle_ioctl, 0),
1444 DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
1445 kgsl_gem_unlock_on_ts_ioctl, 0),
1446 DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
1447 DRM_MASTER),
1448};
1449
1450static struct drm_driver driver = {
Michael Street8bacdd02012-01-05 14:55:01 -08001451 .driver_features = DRIVER_GEM,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 .load = kgsl_drm_load,
1453 .unload = kgsl_drm_unload,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 .preclose = kgsl_drm_preclose,
1455 .suspend = kgsl_drm_suspend,
1456 .resume = kgsl_drm_resume,
1457 .reclaim_buffers = drm_core_reclaim_buffers,
1458 .gem_init_object = kgsl_gem_init_object,
1459 .gem_free_object = kgsl_gem_free_object,
1460 .ioctls = kgsl_drm_ioctls,
1461
1462 .fops = {
1463 .owner = THIS_MODULE,
1464 .open = drm_open,
1465 .release = drm_release,
1466 .unlocked_ioctl = drm_ioctl,
1467 .mmap = msm_drm_gem_mmap,
1468 .poll = drm_poll,
1469 .fasync = drm_fasync,
1470 },
1471
1472 .name = DRIVER_NAME,
1473 .desc = DRIVER_DESC,
1474 .date = DRIVER_DATE,
1475 .major = DRIVER_MAJOR,
1476 .minor = DRIVER_MINOR,
1477 .patchlevel = DRIVER_PATCHLEVEL,
1478};
1479
1480int kgsl_drm_init(struct platform_device *dev)
1481{
1482 int i;
1483
Michael Street8bacdd02012-01-05 14:55:01 -08001484 /* Only initialize once */
1485 if (kgsl_drm_inited == DRM_KGSL_INITED)
1486 return 0;
1487
1488 kgsl_drm_inited = DRM_KGSL_INITED;
1489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491
1492 INIT_LIST_HEAD(&kgsl_mem_list);
1493
1494 for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
1495 gem_buf_fence[i].num_buffers = 0;
1496 gem_buf_fence[i].ts_valid = 0;
1497 gem_buf_fence[i].fence_id = ENTRY_EMPTY;
1498 }
1499
Michael Street8bacdd02012-01-05 14:55:01 -08001500 return drm_platform_init(&driver, dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501}
1502
1503void kgsl_drm_exit(void)
1504{
Michael Street8bacdd02012-01-05 14:55:01 -08001505 kgsl_drm_inited = DRM_KGSL_NOT_INITED;
1506 drm_platform_exit(&driver, driver.kdriver.platform_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001507}