blob: 8f75daa22c6b3f6d563e5746c035891825320daa [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/vmalloc.h>
14#include <linux/memory_alloc.h>
15#include <asm/cacheflush.h>
16
17#include "kgsl.h"
18#include "kgsl_sharedmem.h"
19#include "kgsl_cffdump.h"
20#include "kgsl_device.h"
21
22static struct kgsl_process_private *
23_get_priv_from_kobj(struct kobject *kobj)
24{
25 struct kgsl_process_private *private;
26 unsigned long name;
27
28 if (!kobj)
29 return NULL;
30
31 if (sscanf(kobj->name, "%ld", &name) != 1)
32 return NULL;
33
34 list_for_each_entry(private, &kgsl_driver.process_list, list) {
35 if (private->pid == name)
36 return private;
37 }
38
39 return NULL;
40}
41
42/* sharedmem / memory sysfs files */
43
44static ssize_t
45process_show(struct kobject *kobj,
46 struct kobj_attribute *attr,
47 char *buf)
48{
49 struct kgsl_process_private *priv;
50 unsigned int val = 0;
51
52 mutex_lock(&kgsl_driver.process_mutex);
53 priv = _get_priv_from_kobj(kobj);
54
55 if (priv == NULL) {
56 mutex_unlock(&kgsl_driver.process_mutex);
57 return 0;
58 }
59
60 if (!strncmp(attr->attr.name, "user", 4))
61 val = priv->stats.user;
62 if (!strncmp(attr->attr.name, "user_max", 8))
63 val = priv->stats.user_max;
64 if (!strncmp(attr->attr.name, "mapped", 6))
65 val = priv->stats.mapped;
66 if (!strncmp(attr->attr.name, "mapped_max", 10))
67 val = priv->stats.mapped_max;
68 if (!strncmp(attr->attr.name, "flushes", 7))
69 val = priv->stats.flushes;
70
71 mutex_unlock(&kgsl_driver.process_mutex);
72 return snprintf(buf, PAGE_SIZE, "%u\n", val);
73}
74
75#define KGSL_MEMSTAT_ATTR(_name, _show) \
76 static struct kobj_attribute attr_##_name = \
77 __ATTR(_name, 0444, _show, NULL)
78
79KGSL_MEMSTAT_ATTR(user, process_show);
80KGSL_MEMSTAT_ATTR(user_max, process_show);
81KGSL_MEMSTAT_ATTR(mapped, process_show);
82KGSL_MEMSTAT_ATTR(mapped_max, process_show);
83KGSL_MEMSTAT_ATTR(flushes, process_show);
84
85static struct attribute *process_attrs[] = {
86 &attr_user.attr,
87 &attr_user_max.attr,
88 &attr_mapped.attr,
89 &attr_mapped_max.attr,
90 &attr_flushes.attr,
91 NULL
92};
93
94static struct attribute_group process_attr_group = {
95 .attrs = process_attrs,
96};
97
98void
99kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
100{
101 /* Remove the sysfs entry */
102 if (private->kobj) {
103 sysfs_remove_group(private->kobj, &process_attr_group);
104 kobject_put(private->kobj);
105 }
106}
107
108void
109kgsl_process_init_sysfs(struct kgsl_process_private *private)
110{
111 unsigned char name[16];
112
113 /* Add a entry to the sysfs device */
114 snprintf(name, sizeof(name), "%d", private->pid);
115 private->kobj = kobject_create_and_add(name, kgsl_driver.prockobj);
116
117 /* sysfs failure isn't fatal, just annoying */
118 if (private->kobj != NULL) {
119 if (sysfs_create_group(private->kobj, &process_attr_group)) {
120 kobject_put(private->kobj);
121 private->kobj = NULL;
122 }
123 }
124}
125
126static int kgsl_drv_memstat_show(struct device *dev,
127 struct device_attribute *attr,
128 char *buf)
129{
130 unsigned int val = 0;
131
132 if (!strncmp(attr->attr.name, "vmalloc", 7))
133 val = kgsl_driver.stats.vmalloc;
134 else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
135 val = kgsl_driver.stats.vmalloc_max;
136 else if (!strncmp(attr->attr.name, "coherent", 8))
137 val = kgsl_driver.stats.coherent;
138 else if (!strncmp(attr->attr.name, "coherent_max", 12))
139 val = kgsl_driver.stats.coherent_max;
140 else if (!strncmp(attr->attr.name, "mapped", 6))
141 val = kgsl_driver.stats.mapped;
142 else if (!strncmp(attr->attr.name, "mapped_max", 10))
143 val = kgsl_driver.stats.mapped_max;
144
145 return snprintf(buf, PAGE_SIZE, "%u\n", val);
146}
147
148static int kgsl_drv_histogram_show(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 int len = 0;
153 int i;
154
155 for (i = 0; i < 16; i++)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600156 len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157 kgsl_driver.stats.histogram[i]);
158
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600159 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160 return len;
161}
162
163DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
164DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
165DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
166DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
167DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
168DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
169DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
170
171static const struct device_attribute *drv_attr_list[] = {
172 &dev_attr_vmalloc,
173 &dev_attr_vmalloc_max,
174 &dev_attr_coherent,
175 &dev_attr_coherent_max,
176 &dev_attr_mapped,
177 &dev_attr_mapped_max,
178 &dev_attr_histogram,
179 NULL
180};
181
182void
183kgsl_sharedmem_uninit_sysfs(void)
184{
185 kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
186}
187
188int
189kgsl_sharedmem_init_sysfs(void)
190{
191 return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
192 drv_attr_list);
193}
194
195#ifdef CONFIG_OUTER_CACHE
196static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
197{
198 switch (op) {
199 case KGSL_CACHE_OP_FLUSH:
200 outer_flush_range(addr, addr + size);
201 break;
202 case KGSL_CACHE_OP_CLEAN:
203 outer_clean_range(addr, addr + size);
204 break;
205 case KGSL_CACHE_OP_INV:
206 outer_inv_range(addr, addr + size);
207 break;
208 }
209}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210
Jordan Croused17e9aa2011-10-12 16:57:48 -0600211static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600213 struct scatterlist *s;
214 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215
Jordan Croused17e9aa2011-10-12 16:57:48 -0600216 for_each_sg(sg, s, sglen, i) {
217 unsigned int paddr = sg_phys(s);
218 _outer_cache_range_op(op, paddr, s->length);
219 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220}
221
Jordan Croused17e9aa2011-10-12 16:57:48 -0600222#else
223static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225}
226#endif
227
228static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
229 struct vm_area_struct *vma,
230 struct vm_fault *vmf)
231{
232 unsigned long offset, pg;
233 struct page *page;
234
235 offset = (unsigned long) vmf->virtual_address - vma->vm_start;
236 pg = (unsigned long) memdesc->hostptr + offset;
237
238 page = vmalloc_to_page((void *) pg);
239 if (page == NULL)
240 return VM_FAULT_SIGBUS;
241
242 get_page(page);
243
244 vmf->page = page;
245 return 0;
246}
247
248static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
249{
250 return VM_RESERVED | VM_DONTEXPAND;
251}
252
253static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
254{
255 kgsl_driver.stats.vmalloc -= memdesc->size;
256 vfree(memdesc->hostptr);
257}
258
259static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
260{
261 return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
262}
263
264static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
265 struct vm_area_struct *vma,
266 struct vm_fault *vmf)
267{
268 unsigned long offset, pfn;
269 int ret;
270
271 offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
272 PAGE_SHIFT;
273
274 pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
275 ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
276
277 if (ret == -ENOMEM || ret == -EAGAIN)
278 return VM_FAULT_OOM;
279 else if (ret == -EFAULT)
280 return VM_FAULT_SIGBUS;
281
282 return VM_FAULT_NOPAGE;
283}
284
285static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
286
287{
288 kgsl_driver.stats.coherent -= memdesc->size;
289 if (memdesc->hostptr)
290 iounmap(memdesc->hostptr);
291
292 free_contiguous_memory_by_paddr(memdesc->physaddr);
293}
294
295static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
296{
297 kgsl_driver.stats.coherent -= memdesc->size;
298 dma_free_coherent(NULL, memdesc->size,
299 memdesc->hostptr, memdesc->physaddr);
300}
301
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302/* Global - also used by kgsl_drm.c */
303struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304 .free = kgsl_vmalloc_free,
305 .vmflags = kgsl_vmalloc_vmflags,
306 .vmfault = kgsl_vmalloc_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700307};
308EXPORT_SYMBOL(kgsl_vmalloc_ops);
309
310static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700311 .free = kgsl_ebimem_free,
312 .vmflags = kgsl_contiguous_vmflags,
313 .vmfault = kgsl_contiguous_vmfault,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314};
315
316static struct kgsl_memdesc_ops kgsl_coherent_ops = {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 .free = kgsl_coherent_free,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700318};
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
321{
322 void *addr = memdesc->hostptr;
323 int size = memdesc->size;
324
325 switch (op) {
326 case KGSL_CACHE_OP_FLUSH:
327 dmac_flush_range(addr, addr + size);
328 break;
329 case KGSL_CACHE_OP_CLEAN:
330 dmac_clean_range(addr, addr + size);
331 break;
332 case KGSL_CACHE_OP_INV:
333 dmac_inv_range(addr, addr + size);
334 break;
335 }
336
Jordan Croused17e9aa2011-10-12 16:57:48 -0600337 outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338}
339EXPORT_SYMBOL(kgsl_cache_range_op);
340
341static int
342_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
343 struct kgsl_pagetable *pagetable,
344 void *ptr, size_t size, unsigned int protflags)
345{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600346 int order, ret = 0;
347 int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
348 int i;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350 memdesc->size = size;
351 memdesc->pagetable = pagetable;
352 memdesc->priv = KGSL_MEMFLAGS_CACHED;
353 memdesc->ops = &kgsl_vmalloc_ops;
354 memdesc->hostptr = (void *) ptr;
355
Jordan Croused17e9aa2011-10-12 16:57:48 -0600356 memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
357 if (memdesc->sg == NULL) {
358 ret = -ENOMEM;
359 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 }
361
Jordan Croused17e9aa2011-10-12 16:57:48 -0600362 memdesc->sglen = sglen;
363 sg_init_table(memdesc->sg, sglen);
364
365 for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
366 struct page *page = vmalloc_to_page(ptr);
367 if (!page) {
368 ret = -EINVAL;
369 goto done;
370 }
371 sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
372 }
373
374 kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
375
376 ret = kgsl_mmu_map(pagetable, memdesc, protflags);
377
378 if (ret)
379 goto done;
380
381 KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
382 kgsl_driver.stats.vmalloc_max);
383
384 order = get_order(size);
385
386 if (order < 16)
387 kgsl_driver.stats.histogram[order]++;
388
389done:
390 if (ret)
391 kgsl_sharedmem_free(memdesc);
392
393 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394}
395
396int
397kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
398 struct kgsl_pagetable *pagetable, size_t size)
399{
400 void *ptr;
401
402 BUG_ON(size == 0);
403
404 size = ALIGN(size, PAGE_SIZE * 2);
405 ptr = vmalloc(size);
406
407 if (ptr == NULL) {
408 KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
409 return -ENOMEM;
410 }
411
412 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
413 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
414}
415EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
416
417int
418kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
419 struct kgsl_pagetable *pagetable,
420 size_t size, int flags)
421{
422 void *ptr;
423 unsigned int protflags;
424
425 BUG_ON(size == 0);
426 ptr = vmalloc_user(size);
427
428 if (ptr == NULL) {
429 KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
430 size, kgsl_driver.stats.vmalloc);
431 return -ENOMEM;
432 }
433
434 protflags = GSL_PT_PAGE_RV;
435 if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
436 protflags |= GSL_PT_PAGE_WV;
437
438 return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
439 protflags);
440}
441EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
442
443int
444kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
445{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600446 int result = 0;
447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 size = ALIGN(size, PAGE_SIZE);
449
Jordan Croused17e9aa2011-10-12 16:57:48 -0600450 memdesc->size = size;
451 memdesc->ops = &kgsl_coherent_ops;
452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
454 GFP_KERNEL);
455 if (memdesc->hostptr == NULL) {
456 KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
Jordan Croused17e9aa2011-10-12 16:57:48 -0600457 result = -ENOMEM;
458 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 }
460
Jordan Croused17e9aa2011-10-12 16:57:48 -0600461 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
462 if (result)
463 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464
465 /* Record statistics */
466
467 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
468 kgsl_driver.stats.coherent_max);
469
Jordan Croused17e9aa2011-10-12 16:57:48 -0600470err:
471 if (result)
472 kgsl_sharedmem_free(memdesc);
473
474 return result;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700475}
476EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
477
478void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
479{
480 if (memdesc == NULL || memdesc->size == 0)
481 return;
482
483 if (memdesc->gpuaddr)
484 kgsl_mmu_unmap(memdesc->pagetable, memdesc);
485
Jordan Croused17e9aa2011-10-12 16:57:48 -0600486 if (memdesc->ops && memdesc->ops->free)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487 memdesc->ops->free(memdesc);
488
Jordan Croused17e9aa2011-10-12 16:57:48 -0600489 kfree(memdesc->sg);
490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491 memset(memdesc, 0, sizeof(*memdesc));
492}
493EXPORT_SYMBOL(kgsl_sharedmem_free);
494
495static int
496_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
497 struct kgsl_pagetable *pagetable, size_t size)
498{
Jordan Croused17e9aa2011-10-12 16:57:48 -0600499 int result = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500
Jordan Croused17e9aa2011-10-12 16:57:48 -0600501 memdesc->size = size;
502 memdesc->pagetable = pagetable;
503 memdesc->ops = &kgsl_ebimem_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
505
506 if (memdesc->physaddr == 0) {
507 KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
508 size);
509 return -ENOMEM;
510 }
511
Jordan Croused17e9aa2011-10-12 16:57:48 -0600512 result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
513
514 if (result)
515 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516
517 result = kgsl_mmu_map(pagetable, memdesc,
518 GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
519
520 if (result)
Jordan Croused17e9aa2011-10-12 16:57:48 -0600521 goto err;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522
523 KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
524 kgsl_driver.stats.coherent_max);
525
Jordan Croused17e9aa2011-10-12 16:57:48 -0600526err:
527 if (result)
528 kgsl_sharedmem_free(memdesc);
529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 return result;
531}
532
533int
534kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
535 struct kgsl_pagetable *pagetable,
536 size_t size, int flags)
537{
538 size = ALIGN(size, PAGE_SIZE);
539 return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
540}
541EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
542
543int
544kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
545 struct kgsl_pagetable *pagetable, size_t size)
546{
547 int result;
548 size = ALIGN(size, 8192);
549 result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
550
551 if (result)
552 return result;
553
554 memdesc->hostptr = ioremap(memdesc->physaddr, size);
555
556 if (memdesc->hostptr == NULL) {
557 KGSL_CORE_ERR("ioremap failed\n");
558 kgsl_sharedmem_free(memdesc);
559 return -ENOMEM;
560 }
561
562 return 0;
563}
564EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
565
566int
567kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
568 uint32_t *dst,
569 unsigned int offsetbytes)
570{
571 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
572 WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
573
574 if (offsetbytes + sizeof(unsigned int) > memdesc->size)
575 return -ERANGE;
576
577 *dst = readl_relaxed(memdesc->hostptr + offsetbytes);
578 return 0;
579}
580EXPORT_SYMBOL(kgsl_sharedmem_readl);
581
582int
583kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
584 unsigned int offsetbytes,
585 uint32_t src)
586{
587 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
588 BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
589
590 kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes,
591 src, sizeof(uint));
592 writel_relaxed(src, memdesc->hostptr + offsetbytes);
593 return 0;
594}
595EXPORT_SYMBOL(kgsl_sharedmem_writel);
596
597int
598kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
599 unsigned int value, unsigned int sizebytes)
600{
601 BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
602 BUG_ON(offsetbytes + sizebytes > memdesc->size);
603
604 kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes, value,
605 sizebytes);
606 memset(memdesc->hostptr + offsetbytes, value, sizebytes);
607 return 0;
608}
609EXPORT_SYMBOL(kgsl_sharedmem_set);