blob: 377331314592f1e46fb280dd91798b3d1037d6fe [file] [log] [blame]
Mitchel Humpherys065497f2012-08-29 16:20:15 -07001/*
2 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherysbffc5792013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#ifdef __KERNEL__
17
18#include <linux/slab.h>
19#include <linux/completion.h>
20#include <linux/pagemap.h>
21#include <linux/mm.h>
22#include <linux/fs.h>
23#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/cdev.h>
26#include <linux/list.h>
27#include <linux/hash.h>
28#include <linux/msm_ion.h>
29#include <mach/msm_smd.h>
30#include <mach/ion.h>
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -070031#include <linux/scatterlist.h>
Mitchel Humpherysbffc5792013-02-06 12:03:20 -080032#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35
36#ifndef ION_ADSPRPC_HEAP_ID
37#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
38#endif /*ION_ADSPRPC_HEAP_ID*/
39
40#define RPC_TIMEOUT (5 * HZ)
41#define RPC_HASH_BITS 5
42#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
43#define BALIGN 32
44
45#define LOCK_MMAP(kernel)\
46 do {\
47 if (!kernel)\
48 down_read(&current->mm->mmap_sem);\
49 } while (0)
50
51#define UNLOCK_MMAP(kernel)\
52 do {\
53 if (!kernel)\
54 up_read(&current->mm->mmap_sem);\
55 } while (0)
56
57
58static inline uint32_t buf_page_start(void *buf)
59{
60 uint32_t start = (uint32_t) buf & PAGE_MASK;
61 return start;
62}
63
64static inline uint32_t buf_page_offset(void *buf)
65{
66 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
67 return offset;
68}
69
70static inline int buf_num_pages(void *buf, int len)
71{
72 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
73 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
74 int nPages = end - start + 1;
75 return nPages;
76}
77
78static inline uint32_t buf_page_size(uint32_t size)
79{
80 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
81 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
82}
83
84static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
85 struct smq_phy_page *pages, int nr_elems)
86{
87 struct vm_area_struct *vma;
88 uint32_t start = buf_page_start(addr);
89 uint32_t len = nr_pages << PAGE_SHIFT;
90 unsigned long pfn;
91 int n = -1, err = 0;
92
93 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
94 (void __user *)start, len));
95 if (err)
96 goto bail;
97 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
98 if (err)
99 goto bail;
100 VERIFY(err, ((uint32_t)addr + sz) <= vma->vm_end);
101 if (err)
102 goto bail;
103 n = 0;
104 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
105 if (err)
106 goto bail;
107 VERIFY(err, nr_elems > 0);
108 if (err)
109 goto bail;
110 pages->addr = __pfn_to_phys(pfn);
111 pages->size = len;
112 n++;
113 bail:
114 return n;
115}
116
117#endif /*__KERNEL__*/
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700118
119struct smq_invoke_ctx {
120 struct completion work;
121 int retval;
122 atomic_t free;
123};
124
125struct smq_context_list {
126 struct smq_invoke_ctx *ls;
127 int size;
128 int last;
129};
130
131struct fastrpc_apps {
132 smd_channel_t *chan;
133 struct smq_context_list clst;
134 struct completion work;
135 struct ion_client *iclient;
136 struct cdev cdev;
137 dev_t dev_no;
138 spinlock_t wrlock;
139 spinlock_t hlock;
140 struct hlist_head htbl[RPC_HASH_SZ];
141};
142
143struct fastrpc_buf {
144 struct ion_handle *handle;
145 void *virt;
146 ion_phys_addr_t phys;
147 int size;
148 int used;
149};
150
151struct fastrpc_device {
152 uint32_t tgid;
153 struct hlist_node hn;
154 struct fastrpc_buf buf;
155};
156
157static struct fastrpc_apps gfa;
158
159static void free_mem(struct fastrpc_buf *buf)
160{
161 struct fastrpc_apps *me = &gfa;
162
163 if (buf->handle) {
164 if (buf->virt) {
165 ion_unmap_kernel(me->iclient, buf->handle);
166 buf->virt = 0;
167 }
168 ion_free(me->iclient, buf->handle);
169 buf->handle = 0;
170 }
171}
172
173static int alloc_mem(struct fastrpc_buf *buf)
174{
175 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700176 struct sg_table *sg;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700177 int err = 0;
178
179 buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700180 ION_HEAP(ION_AUDIO_HEAP_ID), 0);
181 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
182 if (err)
183 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700184 buf->virt = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700185 VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
186 if (err)
187 goto bail;
188 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
189 if (err)
190 goto bail;
191 VERIFY(err, 1 == sg->nents);
192 if (err)
193 goto bail;
194 buf->phys = sg_dma_address(sg->sgl);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700195 bail:
196 if (err && !IS_ERR_OR_NULL(buf->handle))
197 free_mem(buf);
198 return err;
199}
200
201static int context_list_ctor(struct smq_context_list *me, int size)
202{
203 int err = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700204 VERIFY(err, 0 != (me->ls = kzalloc(size, GFP_KERNEL)));
205 if (err)
206 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700207 me->size = size / sizeof(*me->ls);
208 me->last = 0;
209 bail:
210 return err;
211}
212
213static void context_list_dtor(struct smq_context_list *me)
214{
215 kfree(me->ls);
216 me->ls = 0;
217}
218
219static void context_list_alloc_ctx(struct smq_context_list *me,
220 struct smq_invoke_ctx **po)
221{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700222 int i = me->last;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700223 struct smq_invoke_ctx *ctx;
224
225 for (;;) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700226 i = i % me->size;
227 ctx = &me->ls[i];
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700228 if (atomic_read(&ctx->free) == 0)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700229 if (atomic_cmpxchg(&ctx->free, 0, 1) == 0)
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700230 break;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700231 i++;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700232 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700233 me->last = i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700234 ctx->retval = -1;
235 init_completion(&ctx->work);
236 *po = ctx;
237}
238
239static void context_free(struct smq_invoke_ctx *me)
240{
241 if (me)
242 atomic_set(&me->free, 0);
243}
244
245static void context_notify_user(struct smq_invoke_ctx *me, int retval)
246{
247 me->retval = retval;
248 complete(&me->work);
249}
250
251static void context_notify_all_users(struct smq_context_list *me)
252{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700253 int i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700254
255 if (!me->ls)
256 return;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700257 for (i = 0; i < me->size; ++i) {
258 if (atomic_read(&me->ls[i].free) != 0)
259 complete(&me->ls[i].work);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700260 }
261}
262
263static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
264 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
265{
266 struct smq_phy_page *pgstart, *pages;
267 struct smq_invoke_buf *list;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700268 int i, rlen, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700269 int inbufs = REMOTE_SCALARS_INBUFS(sc);
270 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
271
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700272 LOCK_MMAP(kernel);
273 *obuf = *ibuf;
274 retry:
275 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
276 pgstart = smq_phy_page_start(sc, list);
277 pages = pgstart + 1;
278 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
279 if (rlen < 0) {
280 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
281 obuf->size += buf_page_size(rlen);
282 obuf->handle = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700283 VERIFY(err, 0 == alloc_mem(obuf));
284 if (err)
285 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700286 goto retry;
287 }
288 pgstart->addr = obuf->phys;
289 pgstart->size = obuf->size;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700290 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700291 void *buf;
292 int len, num;
293
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700294 list[i].num = 0;
295 list[i].pgidx = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700296 len = pra[i].buf.len;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700297 if (!len)
298 continue;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700299 buf = pra[i].buf.pv;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700300 num = buf_num_pages(buf, len);
301 if (!kernel)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700302 list[i].num = buf_get_pages(buf, len, num,
303 i >= inbufs, pages, rlen / sizeof(*pages));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700304 else
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700305 list[i].num = 0;
306 VERIFY(err, list[i].num >= 0);
307 if (err)
308 goto bail;
309 if (list[i].num) {
310 list[i].pgidx = pages - pgstart;
311 pages = pages + list[i].num;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700312 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700313 list[i].pgidx = pages - pgstart;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700314 pages = pages + 1;
315 } else {
316 if (obuf->handle != ibuf->handle)
317 free_mem(obuf);
318 obuf->size += buf_page_size(sizeof(*pages));
319 obuf->handle = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700320 VERIFY(err, 0 == alloc_mem(obuf));
321 if (err)
322 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700323 goto retry;
324 }
325 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
326 }
327 obuf->used = obuf->size - rlen;
328 bail:
329 if (err && (obuf->handle != ibuf->handle))
330 free_mem(obuf);
331 UNLOCK_MMAP(kernel);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700332 return err;
333}
334
335static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
336 remote_arg_t *rpra, remote_arg_t *upra,
337 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
338 int *nbufs)
339{
340 struct smq_invoke_buf *list;
341 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
342 struct smq_phy_page *pages;
343 void *args;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700344 int i, rlen, size, used, inh, bufs = 0, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700345 int inbufs = REMOTE_SCALARS_INBUFS(sc);
346 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
347
348 list = smq_invoke_buf_start(rpra, sc);
349 pages = smq_phy_page_start(sc, list);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700350 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700351 args = (void *)((char *)pbuf->virt + used);
352 rlen = pbuf->size - used;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700353 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700354 int num;
355
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700356 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700357 if (!rpra[i].buf.len)
358 continue;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700359 if (list[i].num) {
360 rpra[i].buf.pv = pra[i].buf.pv;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700361 continue;
362 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700363 if (rlen < pra[i].buf.len) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700364 struct fastrpc_buf *b;
365 pbuf->used = pbuf->size - rlen;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700366 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700367 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700368 if (err)
369 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700370 obufs = b;
371 pbuf = obufs + bufs;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700372 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700373 PAGE_SIZE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700374 VERIFY(err, 0 == alloc_mem(pbuf));
375 if (err)
376 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700377 bufs++;
378 args = pbuf->virt;
379 rlen = pbuf->size;
380 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700381 num = buf_num_pages(args, pra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700382 if (pbuf == ibuf) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700383 list[i].num = num;
384 list[i].pgidx = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700385 } else {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700386 list[i].num = 1;
387 pages[list[i].pgidx].addr =
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700388 buf_page_start((void *)(pbuf->phys +
389 (pbuf->size - rlen)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700390 pages[list[i].pgidx].size =
391 buf_page_size(pra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700392 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700393 if (i < inbufs) {
394 if (!kernel) {
395 VERIFY(err, 0 == copy_from_user(args,
396 pra[i].buf.pv, pra[i].buf.len));
397 if (err)
398 goto bail;
399 } else {
400 memmove(args, pra[i].buf.pv, pra[i].buf.len);
401 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700402 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700403 rpra[i].buf.pv = args;
404 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
405 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700406 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700407 for (i = 0; i < inbufs; ++i) {
408 if (rpra[i].buf.len)
409 dmac_flush_range(rpra[i].buf.pv,
410 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700411 }
412 pbuf->used = pbuf->size - rlen;
413 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
414 if (size) {
415 inh = inbufs + outbufs;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700416 if (!kernel) {
417 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700418 size));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700419 if (err)
420 goto bail;
421 } else {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700422 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700423 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700424 }
425 dmac_flush_range(rpra, (char *)rpra + used);
426 bail:
427 *abufs = obufs;
428 *nbufs = bufs;
429 return err;
430}
431
432static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
433 remote_arg_t *rpra, remote_arg_t *upra)
434{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700435 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700436 int err = 0;
437
438 inbufs = REMOTE_SCALARS_INBUFS(sc);
439 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700440 for (i = inbufs; i < inbufs + outbufs; ++i) {
441 if (rpra[i].buf.pv != pra[i].buf.pv) {
442 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
443 rpra[i].buf.pv, rpra[i].buf.len));
444 if (err)
445 goto bail;
446 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700447 }
448 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
449 if (size) {
450 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700451 if (!kernel) {
452 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700453 size));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700454 if (err)
455 goto bail;
456 } else {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700457 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700458 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700459 }
460 bail:
461 return err;
462}
463
464static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
465{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700466 int i, inbufs, outbufs;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700467 int inv = 0;
468
469 inbufs = REMOTE_SCALARS_INBUFS(sc);
470 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700471 for (i = inbufs; i < inbufs + outbufs; ++i) {
472 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700473 inv = 1;
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700474 else if (rpra[i].buf.len)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700475 dmac_inv_range(rpra[i].buf.pv,
476 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700477 }
478
479 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
480 dmac_inv_range(rpra, (char *)rpra + used);
481}
482
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700483static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t handle,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700484 uint32_t sc, struct smq_invoke_ctx *ctx,
485 struct fastrpc_buf *buf)
486{
487 struct smq_msg msg;
488 int err = 0, len;
489
490 msg.pid = current->tgid;
491 msg.tid = current->pid;
492 msg.invoke.header.ctx = ctx;
493 msg.invoke.header.handle = handle;
494 msg.invoke.header.sc = sc;
495 msg.invoke.page.addr = buf->phys;
496 msg.invoke.page.size = buf_page_size(buf->used);
497 spin_lock(&me->wrlock);
498 len = smd_write(me->chan, &msg, sizeof(msg));
499 spin_unlock(&me->wrlock);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700500 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700501 return err;
502}
503
504static void fastrpc_deinit(void)
505{
506 struct fastrpc_apps *me = &gfa;
507
508 if (me->chan)
509 (void)smd_close(me->chan);
510 context_list_dtor(&me->clst);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700511 if (me->iclient)
512 ion_client_destroy(me->iclient);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700513 me->iclient = 0;
514 me->chan = 0;
515}
516
517static void fastrpc_read_handler(void)
518{
519 struct fastrpc_apps *me = &gfa;
520 struct smq_invoke_rsp rsp;
521 int err = 0;
522
523 do {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700524 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700525 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700526 if (err)
527 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700528 context_notify_user(rsp.ctx, rsp.retval);
529 } while (!err);
530 bail:
531 return;
532}
533
534static void smd_event_handler(void *priv, unsigned event)
535{
536 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
537
538 switch (event) {
539 case SMD_EVENT_OPEN:
540 complete(&(me->work));
541 break;
542 case SMD_EVENT_CLOSE:
543 context_notify_all_users(&me->clst);
544 break;
545 case SMD_EVENT_DATA:
546 fastrpc_read_handler();
547 break;
548 }
549}
550
551static int fastrpc_init(void)
552{
553 int err = 0;
554 struct fastrpc_apps *me = &gfa;
555
556 if (me->chan == 0) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700557 int i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700558 spin_lock_init(&me->hlock);
559 spin_lock_init(&me->wrlock);
560 init_completion(&me->work);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700561 for (i = 0; i < RPC_HASH_SZ; ++i)
562 INIT_HLIST_HEAD(&me->htbl[i]);
563 VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
564 if (err)
565 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700566 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
567 DEVICE_NAME);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700568 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
569 if (err)
570 goto bail;
571 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700572 SMD_APPS_QDSP, &me->chan,
573 me, smd_event_handler));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700574 if (err)
575 goto bail;
576 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700577 RPC_TIMEOUT));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700578 if (err)
579 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700580 }
581 bail:
582 if (err)
583 fastrpc_deinit();
584 return err;
585}
586
587static void free_dev(struct fastrpc_device *dev)
588{
589 if (dev) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700590 free_mem(&dev->buf);
591 kfree(dev);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800592 module_put(THIS_MODULE);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700593 }
594}
595
596static int alloc_dev(struct fastrpc_device **dev)
597{
598 int err = 0;
599 struct fastrpc_device *fd = 0;
600
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700601 VERIFY(err, 0 != try_module_get(THIS_MODULE));
602 if (err)
603 goto bail;
604 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
605 if (err)
606 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700607 fd->buf.size = PAGE_SIZE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700608 VERIFY(err, 0 == alloc_mem(&fd->buf));
609 if (err)
610 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700611 fd->tgid = current->tgid;
612 INIT_HLIST_NODE(&fd->hn);
613 *dev = fd;
614 bail:
615 if (err)
616 free_dev(fd);
617 return err;
618}
619
620static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
621{
622 struct hlist_head *head;
623 struct fastrpc_device *dev = 0;
624 struct hlist_node *n;
625 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
626 int err = 0;
627
628 spin_lock(&me->hlock);
629 head = &me->htbl[h];
630 hlist_for_each_entry(dev, n, head, hn) {
631 if (dev->tgid == current->tgid) {
632 hlist_del(&dev->hn);
633 break;
634 }
635 }
636 spin_unlock(&me->hlock);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700637 VERIFY(err, dev != 0);
638 if (err)
639 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700640 *rdev = dev;
641 bail:
642 if (err) {
643 free_dev(dev);
644 err = alloc_dev(rdev);
645 }
646 return err;
647}
648
649static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
650{
651 struct hlist_head *head;
652 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
653
654 spin_lock(&me->hlock);
655 head = &me->htbl[h];
656 hlist_add_head(&dev->hn, head);
657 spin_unlock(&me->hlock);
658 return;
659}
660
661static int fastrpc_release_current_dsp_process(void);
662
663static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
664 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra)
665{
666 remote_arg_t *rpra = 0;
667 struct fastrpc_device *dev = 0;
668 struct smq_invoke_ctx *ctx = 0;
669 struct fastrpc_buf obuf, *abufs = 0, *b;
670 int interrupted = 0;
671 uint32_t sc;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700672 int i, nbufs = 0, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700673
674 sc = invoke->sc;
675 obuf.handle = 0;
676 if (REMOTE_SCALARS_LENGTH(sc)) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700677 VERIFY(err, 0 == get_dev(me, &dev));
678 if (err)
679 goto bail;
680 VERIFY(err, 0 == get_page_list(kernel, sc, pra, &dev->buf,
681 &obuf));
682 if (err)
683 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700684 rpra = (remote_arg_t *)obuf.virt;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700685 VERIFY(err, 0 == get_args(kernel, sc, pra, rpra, invoke->pra,
686 &obuf, &abufs, &nbufs));
687 if (err)
688 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700689 }
690
691 context_list_alloc_ctx(&me->clst, &ctx);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700692 VERIFY(err, 0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx,
693 &obuf));
694 if (err)
695 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700696 inv_args(sc, rpra, obuf.used);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700697 VERIFY(err, 0 == (interrupted =
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700698 wait_for_completion_interruptible(&ctx->work)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700699 if (err)
700 goto bail;
701 VERIFY(err, 0 == (err = ctx->retval));
702 if (err)
703 goto bail;
704 VERIFY(err, 0 == put_args(kernel, sc, pra, rpra, invoke->pra));
705 if (err)
706 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700707 bail:
708 if (interrupted) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700709 if (!kernel)
710 (void)fastrpc_release_current_dsp_process();
711 wait_for_completion(&ctx->work);
712 }
713 context_free(ctx);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800714
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700715 for (i = 0, b = abufs; i < nbufs; ++i, ++b)
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700716 free_mem(b);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800717
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700718 kfree(abufs);
719 if (dev) {
720 add_dev(me, dev);
721 if (obuf.handle != dev->buf.handle)
722 free_mem(&obuf);
723 }
724 return err;
725}
726
727static int fastrpc_create_current_dsp_process(void)
728{
729 int err = 0;
730 struct fastrpc_ioctl_invoke ioctl;
731 struct fastrpc_apps *me = &gfa;
732 remote_arg_t ra[1];
733 int tgid = 0;
734
735 tgid = current->tgid;
736 ra[0].buf.pv = &tgid;
737 ra[0].buf.len = sizeof(tgid);
738 ioctl.handle = 1;
739 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
740 ioctl.pra = ra;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700741 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700742 return err;
743}
744
745static int fastrpc_release_current_dsp_process(void)
746{
747 int err = 0;
748 struct fastrpc_apps *me = &gfa;
749 struct fastrpc_ioctl_invoke ioctl;
750 remote_arg_t ra[1];
751 int tgid = 0;
752
753 tgid = current->tgid;
754 ra[0].buf.pv = &tgid;
755 ra[0].buf.len = sizeof(tgid);
756 ioctl.handle = 1;
757 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
758 ioctl.pra = ra;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700759 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700760 return err;
761}
762
763static void cleanup_current_dev(void)
764{
765 struct fastrpc_apps *me = &gfa;
766 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
767 struct hlist_head *head;
768 struct hlist_node *pos;
769 struct fastrpc_device *dev;
770
771 rnext:
772 dev = 0;
773 spin_lock(&me->hlock);
774 head = &me->htbl[h];
775 hlist_for_each_entry(dev, pos, head, hn) {
776 if (dev->tgid == current->tgid) {
777 hlist_del(&dev->hn);
778 break;
779 }
780 }
781 spin_unlock(&me->hlock);
782 if (dev) {
783 free_dev(dev);
784 goto rnext;
785 }
786 return;
787}
788
789static int fastrpc_device_release(struct inode *inode, struct file *file)
790{
791 (void)fastrpc_release_current_dsp_process();
792 cleanup_current_dev();
793 return 0;
794}
795
796static int fastrpc_device_open(struct inode *inode, struct file *filp)
797{
798 int err = 0;
799
800 if (0 != try_module_get(THIS_MODULE)) {
801 /* This call will cause a dev to be created
802 * which will addref this module
803 */
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700804 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700805 if (err)
806 cleanup_current_dev();
807 module_put(THIS_MODULE);
808 }
809 return err;
810}
811
812
813static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
814 unsigned long ioctl_param)
815{
816 struct fastrpc_apps *me = &gfa;
817 struct fastrpc_ioctl_invoke invoke;
818 remote_arg_t *pra = 0;
819 void *param = (char *)ioctl_param;
820 int bufs, err = 0;
821
822 switch (ioctl_num) {
823 case FASTRPC_IOCTL_INVOKE:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700824 VERIFY(err, 0 == copy_from_user(&invoke, param,
825 sizeof(invoke)));
826 if (err)
827 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700828 bufs = REMOTE_SCALARS_INBUFS(invoke.sc) +
829 REMOTE_SCALARS_OUTBUFS(invoke.sc);
830 if (bufs) {
831 bufs = bufs * sizeof(*pra);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700832 VERIFY(err, 0 != (pra = kmalloc(bufs, GFP_KERNEL)));
833 if (err)
834 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700835 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700836 VERIFY(err, 0 == copy_from_user(pra, invoke.pra, bufs));
837 if (err)
838 goto bail;
839 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 0, &invoke,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700840 pra)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700841 if (err)
842 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700843 break;
844 default:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700845 err = -ENOTTY;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700846 break;
847 }
848 bail:
849 kfree(pra);
850 return err;
851}
852
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800853#ifdef __KERNEL__
854
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700855static const struct file_operations fops = {
856 .open = fastrpc_device_open,
857 .release = fastrpc_device_release,
858 .unlocked_ioctl = fastrpc_device_ioctl,
859};
860
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800861#endif /*__KERNEL__*/
862
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700863static int __init fastrpc_device_init(void)
864{
865 struct fastrpc_apps *me = &gfa;
866 int err = 0;
867
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700868 VERIFY(err, 0 == fastrpc_init());
869 if (err)
870 goto bail;
871 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
872 if (err)
873 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700874 cdev_init(&me->cdev, &fops);
875 me->cdev.owner = THIS_MODULE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700876 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
877 if (err)
878 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700879 pr_info("'mknod /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
880 bail:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700881 if (err) {
882 if (me->dev_no)
883 unregister_chrdev_region(me->dev_no, 1);
884 fastrpc_deinit();
885 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700886 return err;
887}
888
889static void __exit fastrpc_device_exit(void)
890{
891 struct fastrpc_apps *me = &gfa;
892
893 fastrpc_deinit();
894 cdev_del(&me->cdev);
895 unregister_chrdev_region(me->dev_no, 1);
896}
897
898module_init(fastrpc_device_init);
899module_exit(fastrpc_device_exit);
900
901MODULE_LICENSE("GPL v2");