blob: 8e68acd948aab48ca41b51a6b416f2ce3316925a [file] [log] [blame]
Mitchel Humpherys065497f2012-08-29 16:20:15 -07001/*
2 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
Mitchel Humpherysbffc5792013-02-06 12:03:20 -080014#include "adsprpc_shared.h"
15
16#ifdef __KERNEL__
17
18#include <linux/slab.h>
19#include <linux/completion.h>
20#include <linux/pagemap.h>
21#include <linux/mm.h>
22#include <linux/fs.h>
23#include <linux/sched.h>
24#include <linux/module.h>
25#include <linux/cdev.h>
26#include <linux/list.h>
27#include <linux/hash.h>
28#include <linux/msm_ion.h>
29#include <mach/msm_smd.h>
30#include <mach/ion.h>
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -070031#include <linux/scatterlist.h>
Mitchel Humpherysbffc5792013-02-06 12:03:20 -080032#include <linux/fs.h>
33#include <linux/uaccess.h>
34#include <linux/device.h>
35
36#ifndef ION_ADSPRPC_HEAP_ID
37#define ION_ADSPRPC_HEAP_ID ION_AUDIO_HEAP_ID
38#endif /*ION_ADSPRPC_HEAP_ID*/
39
40#define RPC_TIMEOUT (5 * HZ)
41#define RPC_HASH_BITS 5
42#define RPC_HASH_SZ (1 << RPC_HASH_BITS)
43#define BALIGN 32
44
45#define LOCK_MMAP(kernel)\
46 do {\
47 if (!kernel)\
48 down_read(&current->mm->mmap_sem);\
49 } while (0)
50
51#define UNLOCK_MMAP(kernel)\
52 do {\
53 if (!kernel)\
54 up_read(&current->mm->mmap_sem);\
55 } while (0)
56
57
58static inline uint32_t buf_page_start(void *buf)
59{
60 uint32_t start = (uint32_t) buf & PAGE_MASK;
61 return start;
62}
63
64static inline uint32_t buf_page_offset(void *buf)
65{
66 uint32_t offset = (uint32_t) buf & (PAGE_SIZE - 1);
67 return offset;
68}
69
70static inline int buf_num_pages(void *buf, int len)
71{
72 uint32_t start = buf_page_start(buf) >> PAGE_SHIFT;
73 uint32_t end = (((uint32_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
74 int nPages = end - start + 1;
75 return nPages;
76}
77
78static inline uint32_t buf_page_size(uint32_t size)
79{
80 uint32_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
81 return sz > PAGE_SIZE ? sz : PAGE_SIZE;
82}
83
84static inline int buf_get_pages(void *addr, int sz, int nr_pages, int access,
85 struct smq_phy_page *pages, int nr_elems)
86{
87 struct vm_area_struct *vma;
88 uint32_t start = buf_page_start(addr);
89 uint32_t len = nr_pages << PAGE_SHIFT;
90 unsigned long pfn;
91 int n = -1, err = 0;
92
93 VERIFY(err, 0 != access_ok(access ? VERIFY_WRITE : VERIFY_READ,
94 (void __user *)start, len));
95 if (err)
96 goto bail;
97 VERIFY(err, 0 != (vma = find_vma(current->mm, start)));
98 if (err)
99 goto bail;
100 VERIFY(err, ((uint32_t)addr + sz) <= vma->vm_end);
101 if (err)
102 goto bail;
103 n = 0;
104 VERIFY(err, 0 == follow_pfn(vma, start, &pfn));
105 if (err)
106 goto bail;
107 VERIFY(err, nr_elems > 0);
108 if (err)
109 goto bail;
110 pages->addr = __pfn_to_phys(pfn);
111 pages->size = len;
112 n++;
113 bail:
114 return n;
115}
116
117#endif /*__KERNEL__*/
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700118
119struct smq_invoke_ctx {
120 struct completion work;
121 int retval;
122 atomic_t free;
123};
124
125struct smq_context_list {
126 struct smq_invoke_ctx *ls;
127 int size;
128 int last;
129};
130
131struct fastrpc_apps {
132 smd_channel_t *chan;
133 struct smq_context_list clst;
134 struct completion work;
135 struct ion_client *iclient;
136 struct cdev cdev;
Mitchel Humpherys37da84a2013-02-02 11:23:42 -0800137 struct class *class;
138 struct device *dev;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700139 dev_t dev_no;
140 spinlock_t wrlock;
141 spinlock_t hlock;
142 struct hlist_head htbl[RPC_HASH_SZ];
143};
144
145struct fastrpc_buf {
146 struct ion_handle *handle;
147 void *virt;
148 ion_phys_addr_t phys;
149 int size;
150 int used;
151};
152
153struct fastrpc_device {
154 uint32_t tgid;
155 struct hlist_node hn;
156 struct fastrpc_buf buf;
157};
158
159static struct fastrpc_apps gfa;
160
161static void free_mem(struct fastrpc_buf *buf)
162{
163 struct fastrpc_apps *me = &gfa;
164
165 if (buf->handle) {
166 if (buf->virt) {
167 ion_unmap_kernel(me->iclient, buf->handle);
168 buf->virt = 0;
169 }
170 ion_free(me->iclient, buf->handle);
171 buf->handle = 0;
172 }
173}
174
175static int alloc_mem(struct fastrpc_buf *buf)
176{
177 struct ion_client *clnt = gfa.iclient;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700178 struct sg_table *sg;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700179 int err = 0;
180
181 buf->handle = ion_alloc(clnt, buf->size, SZ_4K,
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700182 ION_HEAP(ION_AUDIO_HEAP_ID), 0);
183 VERIFY(err, 0 == IS_ERR_OR_NULL(buf->handle));
184 if (err)
185 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700186 buf->virt = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700187 VERIFY(err, 0 != (buf->virt = ion_map_kernel(clnt, buf->handle)));
188 if (err)
189 goto bail;
190 VERIFY(err, 0 != (sg = ion_sg_table(clnt, buf->handle)));
191 if (err)
192 goto bail;
193 VERIFY(err, 1 == sg->nents);
194 if (err)
195 goto bail;
196 buf->phys = sg_dma_address(sg->sgl);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700197 bail:
198 if (err && !IS_ERR_OR_NULL(buf->handle))
199 free_mem(buf);
200 return err;
201}
202
203static int context_list_ctor(struct smq_context_list *me, int size)
204{
205 int err = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700206 VERIFY(err, 0 != (me->ls = kzalloc(size, GFP_KERNEL)));
207 if (err)
208 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700209 me->size = size / sizeof(*me->ls);
210 me->last = 0;
211 bail:
212 return err;
213}
214
215static void context_list_dtor(struct smq_context_list *me)
216{
217 kfree(me->ls);
218 me->ls = 0;
219}
220
221static void context_list_alloc_ctx(struct smq_context_list *me,
222 struct smq_invoke_ctx **po)
223{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700224 int i = me->last;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700225 struct smq_invoke_ctx *ctx;
226
227 for (;;) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700228 i = i % me->size;
229 ctx = &me->ls[i];
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700230 if (atomic_read(&ctx->free) == 0)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700231 if (atomic_cmpxchg(&ctx->free, 0, 1) == 0)
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700232 break;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700233 i++;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700234 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700235 me->last = i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700236 ctx->retval = -1;
237 init_completion(&ctx->work);
238 *po = ctx;
239}
240
241static void context_free(struct smq_invoke_ctx *me)
242{
243 if (me)
244 atomic_set(&me->free, 0);
245}
246
247static void context_notify_user(struct smq_invoke_ctx *me, int retval)
248{
249 me->retval = retval;
250 complete(&me->work);
251}
252
253static void context_notify_all_users(struct smq_context_list *me)
254{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700255 int i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700256
257 if (!me->ls)
258 return;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700259 for (i = 0; i < me->size; ++i) {
260 if (atomic_read(&me->ls[i].free) != 0)
261 complete(&me->ls[i].work);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700262 }
263}
264
265static int get_page_list(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
266 struct fastrpc_buf *ibuf, struct fastrpc_buf *obuf)
267{
268 struct smq_phy_page *pgstart, *pages;
269 struct smq_invoke_buf *list;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700270 int i, rlen, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700271 int inbufs = REMOTE_SCALARS_INBUFS(sc);
272 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
273
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700274 LOCK_MMAP(kernel);
275 *obuf = *ibuf;
276 retry:
277 list = smq_invoke_buf_start((remote_arg_t *)obuf->virt, sc);
278 pgstart = smq_phy_page_start(sc, list);
279 pages = pgstart + 1;
280 rlen = obuf->size - ((uint32_t)pages - (uint32_t)obuf->virt);
281 if (rlen < 0) {
282 rlen = ((uint32_t)pages - (uint32_t)obuf->virt) - obuf->size;
283 obuf->size += buf_page_size(rlen);
284 obuf->handle = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700285 VERIFY(err, 0 == alloc_mem(obuf));
286 if (err)
287 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700288 goto retry;
289 }
290 pgstart->addr = obuf->phys;
291 pgstart->size = obuf->size;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700292 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700293 void *buf;
294 int len, num;
295
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700296 list[i].num = 0;
297 list[i].pgidx = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700298 len = pra[i].buf.len;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700299 if (!len)
300 continue;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700301 buf = pra[i].buf.pv;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700302 num = buf_num_pages(buf, len);
303 if (!kernel)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700304 list[i].num = buf_get_pages(buf, len, num,
305 i >= inbufs, pages, rlen / sizeof(*pages));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700306 else
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700307 list[i].num = 0;
308 VERIFY(err, list[i].num >= 0);
309 if (err)
310 goto bail;
311 if (list[i].num) {
312 list[i].pgidx = pages - pgstart;
313 pages = pages + list[i].num;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700314 } else if (rlen > sizeof(*pages)) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700315 list[i].pgidx = pages - pgstart;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700316 pages = pages + 1;
317 } else {
318 if (obuf->handle != ibuf->handle)
319 free_mem(obuf);
320 obuf->size += buf_page_size(sizeof(*pages));
321 obuf->handle = 0;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700322 VERIFY(err, 0 == alloc_mem(obuf));
323 if (err)
324 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700325 goto retry;
326 }
327 rlen = obuf->size - ((uint32_t) pages - (uint32_t) obuf->virt);
328 }
329 obuf->used = obuf->size - rlen;
330 bail:
331 if (err && (obuf->handle != ibuf->handle))
332 free_mem(obuf);
333 UNLOCK_MMAP(kernel);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700334 return err;
335}
336
337static int get_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
338 remote_arg_t *rpra, remote_arg_t *upra,
339 struct fastrpc_buf *ibuf, struct fastrpc_buf **abufs,
340 int *nbufs)
341{
342 struct smq_invoke_buf *list;
343 struct fastrpc_buf *pbuf = ibuf, *obufs = 0;
344 struct smq_phy_page *pages;
345 void *args;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700346 int i, rlen, size, used, inh, bufs = 0, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700347 int inbufs = REMOTE_SCALARS_INBUFS(sc);
348 int outbufs = REMOTE_SCALARS_OUTBUFS(sc);
349
350 list = smq_invoke_buf_start(rpra, sc);
351 pages = smq_phy_page_start(sc, list);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700352 used = ALIGN(pbuf->used, BALIGN);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700353 args = (void *)((char *)pbuf->virt + used);
354 rlen = pbuf->size - used;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700355 for (i = 0; i < inbufs + outbufs; ++i) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700356 int num;
357
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700358 rpra[i].buf.len = pra[i].buf.len;
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700359 if (!rpra[i].buf.len)
360 continue;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700361 if (list[i].num) {
362 rpra[i].buf.pv = pra[i].buf.pv;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700363 continue;
364 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700365 if (rlen < pra[i].buf.len) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700366 struct fastrpc_buf *b;
367 pbuf->used = pbuf->size - rlen;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700368 VERIFY(err, 0 != (b = krealloc(obufs,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700369 (bufs + 1) * sizeof(*obufs), GFP_KERNEL)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700370 if (err)
371 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700372 obufs = b;
373 pbuf = obufs + bufs;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700374 pbuf->size = buf_num_pages(0, pra[i].buf.len) *
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700375 PAGE_SIZE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700376 VERIFY(err, 0 == alloc_mem(pbuf));
377 if (err)
378 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700379 bufs++;
380 args = pbuf->virt;
381 rlen = pbuf->size;
382 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700383 num = buf_num_pages(args, pra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700384 if (pbuf == ibuf) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700385 list[i].num = num;
386 list[i].pgidx = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700387 } else {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700388 list[i].num = 1;
389 pages[list[i].pgidx].addr =
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700390 buf_page_start((void *)(pbuf->phys +
391 (pbuf->size - rlen)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700392 pages[list[i].pgidx].size =
393 buf_page_size(pra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700394 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700395 if (i < inbufs) {
396 if (!kernel) {
397 VERIFY(err, 0 == copy_from_user(args,
398 pra[i].buf.pv, pra[i].buf.len));
399 if (err)
400 goto bail;
401 } else {
402 memmove(args, pra[i].buf.pv, pra[i].buf.len);
403 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700404 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700405 rpra[i].buf.pv = args;
406 args = (void *)((char *)args + ALIGN(pra[i].buf.len, BALIGN));
407 rlen -= ALIGN(pra[i].buf.len, BALIGN);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700408 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700409 for (i = 0; i < inbufs; ++i) {
410 if (rpra[i].buf.len)
411 dmac_flush_range(rpra[i].buf.pv,
412 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700413 }
414 pbuf->used = pbuf->size - rlen;
415 size = sizeof(*rpra) * REMOTE_SCALARS_INHANDLES(sc);
416 if (size) {
417 inh = inbufs + outbufs;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700418 if (!kernel) {
419 VERIFY(err, 0 == copy_from_user(&rpra[inh], &upra[inh],
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700420 size));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700421 if (err)
422 goto bail;
423 } else {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700424 memmove(&rpra[inh], &upra[inh], size);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700425 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700426 }
427 dmac_flush_range(rpra, (char *)rpra + used);
428 bail:
429 *abufs = obufs;
430 *nbufs = bufs;
431 return err;
432}
433
434static int put_args(uint32_t kernel, uint32_t sc, remote_arg_t *pra,
435 remote_arg_t *rpra, remote_arg_t *upra)
436{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700437 int i, inbufs, outbufs, outh, size;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700438 int err = 0;
439
440 inbufs = REMOTE_SCALARS_INBUFS(sc);
441 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700442 for (i = inbufs; i < inbufs + outbufs; ++i) {
443 if (rpra[i].buf.pv != pra[i].buf.pv) {
444 VERIFY(err, 0 == copy_to_user(pra[i].buf.pv,
445 rpra[i].buf.pv, rpra[i].buf.len));
446 if (err)
447 goto bail;
448 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700449 }
450 size = sizeof(*rpra) * REMOTE_SCALARS_OUTHANDLES(sc);
451 if (size) {
452 outh = inbufs + outbufs + REMOTE_SCALARS_INHANDLES(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700453 if (!kernel) {
454 VERIFY(err, 0 == copy_to_user(&upra[outh], &rpra[outh],
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700455 size));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700456 if (err)
457 goto bail;
458 } else {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700459 memmove(&upra[outh], &rpra[outh], size);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700460 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700461 }
462 bail:
463 return err;
464}
465
466static void inv_args(uint32_t sc, remote_arg_t *rpra, int used)
467{
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700468 int i, inbufs, outbufs;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700469 int inv = 0;
470
471 inbufs = REMOTE_SCALARS_INBUFS(sc);
472 outbufs = REMOTE_SCALARS_OUTBUFS(sc);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700473 for (i = inbufs; i < inbufs + outbufs; ++i) {
474 if (buf_page_start(rpra) == buf_page_start(rpra[i].buf.pv))
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700475 inv = 1;
Mitchel Humpherys6873f4b2012-10-19 11:29:36 -0700476 else if (rpra[i].buf.len)
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700477 dmac_inv_range(rpra[i].buf.pv,
478 (char *)rpra[i].buf.pv + rpra[i].buf.len);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700479 }
480
481 if (inv || REMOTE_SCALARS_OUTHANDLES(sc))
482 dmac_inv_range(rpra, (char *)rpra + used);
483}
484
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700485static int fastrpc_invoke_send(struct fastrpc_apps *me, uint32_t handle,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700486 uint32_t sc, struct smq_invoke_ctx *ctx,
487 struct fastrpc_buf *buf)
488{
489 struct smq_msg msg;
490 int err = 0, len;
491
492 msg.pid = current->tgid;
493 msg.tid = current->pid;
494 msg.invoke.header.ctx = ctx;
495 msg.invoke.header.handle = handle;
496 msg.invoke.header.sc = sc;
497 msg.invoke.page.addr = buf->phys;
498 msg.invoke.page.size = buf_page_size(buf->used);
499 spin_lock(&me->wrlock);
500 len = smd_write(me->chan, &msg, sizeof(msg));
501 spin_unlock(&me->wrlock);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700502 VERIFY(err, len == sizeof(msg));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700503 return err;
504}
505
506static void fastrpc_deinit(void)
507{
508 struct fastrpc_apps *me = &gfa;
509
510 if (me->chan)
511 (void)smd_close(me->chan);
512 context_list_dtor(&me->clst);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700513 if (me->iclient)
514 ion_client_destroy(me->iclient);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700515 me->iclient = 0;
516 me->chan = 0;
517}
518
519static void fastrpc_read_handler(void)
520{
521 struct fastrpc_apps *me = &gfa;
522 struct smq_invoke_rsp rsp;
523 int err = 0;
524
525 do {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700526 VERIFY(err, sizeof(rsp) ==
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700527 smd_read_from_cb(me->chan, &rsp, sizeof(rsp)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700528 if (err)
529 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700530 context_notify_user(rsp.ctx, rsp.retval);
531 } while (!err);
532 bail:
533 return;
534}
535
536static void smd_event_handler(void *priv, unsigned event)
537{
538 struct fastrpc_apps *me = (struct fastrpc_apps *)priv;
539
540 switch (event) {
541 case SMD_EVENT_OPEN:
542 complete(&(me->work));
543 break;
544 case SMD_EVENT_CLOSE:
545 context_notify_all_users(&me->clst);
546 break;
547 case SMD_EVENT_DATA:
548 fastrpc_read_handler();
549 break;
550 }
551}
552
553static int fastrpc_init(void)
554{
555 int err = 0;
556 struct fastrpc_apps *me = &gfa;
557
558 if (me->chan == 0) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700559 int i;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700560 spin_lock_init(&me->hlock);
561 spin_lock_init(&me->wrlock);
562 init_completion(&me->work);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700563 for (i = 0; i < RPC_HASH_SZ; ++i)
564 INIT_HLIST_HEAD(&me->htbl[i]);
565 VERIFY(err, 0 == context_list_ctor(&me->clst, SZ_4K));
566 if (err)
567 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700568 me->iclient = msm_ion_client_create(ION_HEAP_CARVEOUT_MASK,
569 DEVICE_NAME);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700570 VERIFY(err, 0 == IS_ERR_OR_NULL(me->iclient));
571 if (err)
572 goto bail;
573 VERIFY(err, 0 == smd_named_open_on_edge(FASTRPC_SMD_GUID,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700574 SMD_APPS_QDSP, &me->chan,
575 me, smd_event_handler));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700576 if (err)
577 goto bail;
578 VERIFY(err, 0 != wait_for_completion_timeout(&me->work,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700579 RPC_TIMEOUT));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700580 if (err)
581 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700582 }
583 bail:
584 if (err)
585 fastrpc_deinit();
586 return err;
587}
588
589static void free_dev(struct fastrpc_device *dev)
590{
591 if (dev) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700592 free_mem(&dev->buf);
593 kfree(dev);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800594 module_put(THIS_MODULE);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700595 }
596}
597
598static int alloc_dev(struct fastrpc_device **dev)
599{
600 int err = 0;
601 struct fastrpc_device *fd = 0;
602
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700603 VERIFY(err, 0 != try_module_get(THIS_MODULE));
604 if (err)
605 goto bail;
606 VERIFY(err, 0 != (fd = kzalloc(sizeof(*fd), GFP_KERNEL)));
607 if (err)
608 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700609 fd->buf.size = PAGE_SIZE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700610 VERIFY(err, 0 == alloc_mem(&fd->buf));
611 if (err)
612 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700613 fd->tgid = current->tgid;
614 INIT_HLIST_NODE(&fd->hn);
615 *dev = fd;
616 bail:
617 if (err)
618 free_dev(fd);
619 return err;
620}
621
622static int get_dev(struct fastrpc_apps *me, struct fastrpc_device **rdev)
623{
624 struct hlist_head *head;
625 struct fastrpc_device *dev = 0;
626 struct hlist_node *n;
627 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
628 int err = 0;
629
630 spin_lock(&me->hlock);
631 head = &me->htbl[h];
632 hlist_for_each_entry(dev, n, head, hn) {
633 if (dev->tgid == current->tgid) {
634 hlist_del(&dev->hn);
635 break;
636 }
637 }
638 spin_unlock(&me->hlock);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700639 VERIFY(err, dev != 0);
640 if (err)
641 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700642 *rdev = dev;
643 bail:
644 if (err) {
645 free_dev(dev);
646 err = alloc_dev(rdev);
647 }
648 return err;
649}
650
651static void add_dev(struct fastrpc_apps *me, struct fastrpc_device *dev)
652{
653 struct hlist_head *head;
654 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
655
656 spin_lock(&me->hlock);
657 head = &me->htbl[h];
658 hlist_add_head(&dev->hn, head);
659 spin_unlock(&me->hlock);
660 return;
661}
662
663static int fastrpc_release_current_dsp_process(void);
664
665static int fastrpc_internal_invoke(struct fastrpc_apps *me, uint32_t kernel,
666 struct fastrpc_ioctl_invoke *invoke, remote_arg_t *pra)
667{
668 remote_arg_t *rpra = 0;
669 struct fastrpc_device *dev = 0;
670 struct smq_invoke_ctx *ctx = 0;
671 struct fastrpc_buf obuf, *abufs = 0, *b;
672 int interrupted = 0;
673 uint32_t sc;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700674 int i, nbufs = 0, err = 0;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700675
676 sc = invoke->sc;
677 obuf.handle = 0;
678 if (REMOTE_SCALARS_LENGTH(sc)) {
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700679 VERIFY(err, 0 == get_dev(me, &dev));
680 if (err)
681 goto bail;
682 VERIFY(err, 0 == get_page_list(kernel, sc, pra, &dev->buf,
683 &obuf));
684 if (err)
685 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700686 rpra = (remote_arg_t *)obuf.virt;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700687 VERIFY(err, 0 == get_args(kernel, sc, pra, rpra, invoke->pra,
688 &obuf, &abufs, &nbufs));
689 if (err)
690 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700691 }
692
693 context_list_alloc_ctx(&me->clst, &ctx);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700694 VERIFY(err, 0 == fastrpc_invoke_send(me, invoke->handle, sc, ctx,
695 &obuf));
696 if (err)
697 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700698 inv_args(sc, rpra, obuf.used);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700699 VERIFY(err, 0 == (interrupted =
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700700 wait_for_completion_interruptible(&ctx->work)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700701 if (err)
702 goto bail;
703 VERIFY(err, 0 == (err = ctx->retval));
704 if (err)
705 goto bail;
706 VERIFY(err, 0 == put_args(kernel, sc, pra, rpra, invoke->pra));
707 if (err)
708 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700709 bail:
710 if (interrupted) {
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700711 if (!kernel)
712 (void)fastrpc_release_current_dsp_process();
713 wait_for_completion(&ctx->work);
714 }
715 context_free(ctx);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800716
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700717 for (i = 0, b = abufs; i < nbufs; ++i, ++b)
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700718 free_mem(b);
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800719
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700720 kfree(abufs);
721 if (dev) {
722 add_dev(me, dev);
723 if (obuf.handle != dev->buf.handle)
724 free_mem(&obuf);
725 }
726 return err;
727}
728
729static int fastrpc_create_current_dsp_process(void)
730{
731 int err = 0;
732 struct fastrpc_ioctl_invoke ioctl;
733 struct fastrpc_apps *me = &gfa;
734 remote_arg_t ra[1];
735 int tgid = 0;
736
737 tgid = current->tgid;
738 ra[0].buf.pv = &tgid;
739 ra[0].buf.len = sizeof(tgid);
740 ioctl.handle = 1;
741 ioctl.sc = REMOTE_SCALARS_MAKE(0, 1, 0);
742 ioctl.pra = ra;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700743 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700744 return err;
745}
746
747static int fastrpc_release_current_dsp_process(void)
748{
749 int err = 0;
750 struct fastrpc_apps *me = &gfa;
751 struct fastrpc_ioctl_invoke ioctl;
752 remote_arg_t ra[1];
753 int tgid = 0;
754
755 tgid = current->tgid;
756 ra[0].buf.pv = &tgid;
757 ra[0].buf.len = sizeof(tgid);
758 ioctl.handle = 1;
759 ioctl.sc = REMOTE_SCALARS_MAKE(1, 1, 0);
760 ioctl.pra = ra;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700761 VERIFY(err, 0 == fastrpc_internal_invoke(me, 1, &ioctl, ra));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700762 return err;
763}
764
765static void cleanup_current_dev(void)
766{
767 struct fastrpc_apps *me = &gfa;
768 uint32_t h = hash_32(current->tgid, RPC_HASH_BITS);
769 struct hlist_head *head;
770 struct hlist_node *pos;
771 struct fastrpc_device *dev;
772
773 rnext:
774 dev = 0;
775 spin_lock(&me->hlock);
776 head = &me->htbl[h];
777 hlist_for_each_entry(dev, pos, head, hn) {
778 if (dev->tgid == current->tgid) {
779 hlist_del(&dev->hn);
780 break;
781 }
782 }
783 spin_unlock(&me->hlock);
784 if (dev) {
785 free_dev(dev);
786 goto rnext;
787 }
788 return;
789}
790
791static int fastrpc_device_release(struct inode *inode, struct file *file)
792{
793 (void)fastrpc_release_current_dsp_process();
794 cleanup_current_dev();
795 return 0;
796}
797
798static int fastrpc_device_open(struct inode *inode, struct file *filp)
799{
800 int err = 0;
801
802 if (0 != try_module_get(THIS_MODULE)) {
803 /* This call will cause a dev to be created
804 * which will addref this module
805 */
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700806 VERIFY(err, 0 == fastrpc_create_current_dsp_process());
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700807 if (err)
808 cleanup_current_dev();
809 module_put(THIS_MODULE);
810 }
811 return err;
812}
813
814
815static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
816 unsigned long ioctl_param)
817{
818 struct fastrpc_apps *me = &gfa;
819 struct fastrpc_ioctl_invoke invoke;
820 remote_arg_t *pra = 0;
821 void *param = (char *)ioctl_param;
822 int bufs, err = 0;
823
824 switch (ioctl_num) {
825 case FASTRPC_IOCTL_INVOKE:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700826 VERIFY(err, 0 == copy_from_user(&invoke, param,
827 sizeof(invoke)));
828 if (err)
829 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700830 bufs = REMOTE_SCALARS_INBUFS(invoke.sc) +
831 REMOTE_SCALARS_OUTBUFS(invoke.sc);
832 if (bufs) {
833 bufs = bufs * sizeof(*pra);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700834 VERIFY(err, 0 != (pra = kmalloc(bufs, GFP_KERNEL)));
835 if (err)
836 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700837 }
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700838 VERIFY(err, 0 == copy_from_user(pra, invoke.pra, bufs));
839 if (err)
840 goto bail;
841 VERIFY(err, 0 == (err = fastrpc_internal_invoke(me, 0, &invoke,
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700842 pra)));
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700843 if (err)
844 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700845 break;
846 default:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700847 err = -ENOTTY;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700848 break;
849 }
850 bail:
851 kfree(pra);
852 return err;
853}
854
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800855#ifdef __KERNEL__
856
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700857static const struct file_operations fops = {
858 .open = fastrpc_device_open,
859 .release = fastrpc_device_release,
860 .unlocked_ioctl = fastrpc_device_ioctl,
861};
862
Mitchel Humpherysbffc5792013-02-06 12:03:20 -0800863#endif /*__KERNEL__*/
864
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700865static int __init fastrpc_device_init(void)
866{
867 struct fastrpc_apps *me = &gfa;
868 int err = 0;
869
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700870 VERIFY(err, 0 == fastrpc_init());
871 if (err)
872 goto bail;
873 VERIFY(err, 0 == alloc_chrdev_region(&me->dev_no, 0, 1, DEVICE_NAME));
874 if (err)
875 goto bail;
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700876 cdev_init(&me->cdev, &fops);
877 me->cdev.owner = THIS_MODULE;
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700878 VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0), 1));
879 if (err)
880 goto bail;
Mitchel Humpherys37da84a2013-02-02 11:23:42 -0800881 me->class = class_create(THIS_MODULE, "chardrv");
882 VERIFY(err, !IS_ERR(me->class));
883 if (err)
884 goto bail;
885 me->dev = device_create(me->class, NULL, MKDEV(MAJOR(me->dev_no), 0),
886 NULL, DEVICE_NAME);
887 VERIFY(err, !IS_ERR(me->dev));
888 if (err)
889 goto bail;
890 pr_info("'created /dev/%s c %d 0'\n", DEVICE_NAME, MAJOR(me->dev_no));
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700891 bail:
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700892 if (err) {
893 if (me->dev_no)
894 unregister_chrdev_region(me->dev_no, 1);
Mitchel Humpherys37da84a2013-02-02 11:23:42 -0800895 if (me->class)
896 class_destroy(me->class);
897 if (me->cdev.owner)
898 cdev_del(&me->cdev);
Mitchel Humpherysb6a022f2012-09-30 22:27:53 -0700899 fastrpc_deinit();
900 }
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700901 return err;
902}
903
904static void __exit fastrpc_device_exit(void)
905{
906 struct fastrpc_apps *me = &gfa;
907
908 fastrpc_deinit();
Mitchel Humpherys37da84a2013-02-02 11:23:42 -0800909 device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
910 class_destroy(me->class);
Mitchel Humpherys065497f2012-08-29 16:20:15 -0700911 cdev_del(&me->cdev);
912 unregister_chrdev_region(me->dev_no, 1);
913}
914
915module_init(fastrpc_device_init);
916module_exit(fastrpc_device_exit);
917
918MODULE_LICENSE("GPL v2");