blob: e947dee1ec46726f60676a8c36255a5826cc25bb [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm TrustZone communicator driver
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define KMSG_COMPONENT "TZCOM"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/android_pmem.h>
30#include <linux/io.h>
31#include <mach/scm.h>
32#include <mach/peripheral-loader.h>
33#include <linux/tzcom.h>
34#include "tzcomi.h"
35
36#define TZCOM_DEV "tzcom"
37
38#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
39
40#undef PDEBUG
41#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
42 __func__, current->pid, current->comm, ## args)
43
44#undef PERR
45#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
46 __func__, current->pid, current->comm, ## args)
47
48
49static struct class *driver_class;
50static dev_t tzcom_device_no;
51static struct cdev tzcom_cdev;
52
53static u8 *sb_in_virt;
54static s32 sb_in_phys;
55static size_t sb_in_length = 20 * SZ_1K;
56static u8 *sb_out_virt;
57static s32 sb_out_phys;
58static size_t sb_out_length = 20 * SZ_1K;
59
60static void *pil;
61
62static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
63static DEFINE_MUTEX(sb_in_lock);
64static DEFINE_MUTEX(sb_out_lock);
65static DEFINE_MUTEX(send_cmd_lock);
66
67struct tzcom_callback_list {
68 struct list_head list;
69 struct tzcom_callback callback;
70};
71
72struct tzcom_registered_svc_list {
73 struct list_head list;
74 struct tzcom_register_svc_op_req svc;
75 wait_queue_head_t next_cmd_wq;
76 int next_cmd_flag;
77};
78
79struct tzcom_data_t {
80 struct list_head callback_list_head;
81 struct mutex callback_list_lock;
82 struct list_head registered_svc_list_head;
83 spinlock_t registered_svc_list_lock;
84 wait_queue_head_t cont_cmd_wq;
85 int cont_cmd_flag;
86 u32 handled_cmd_svc_instance_id;
87};
88
89static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
90 void *resp_buf, size_t resp_len)
91{
92 return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
93 cmd_buf, cmd_len, resp_buf, resp_len);
94}
95
96static s32 tzcom_virt_to_phys(u8 *virt)
97{
98 if (virt >= sb_in_virt &&
99 virt < (sb_in_virt + sb_in_length)) {
100 return sb_in_phys + (virt - sb_in_virt);
101 } else if (virt >= sb_out_virt &&
102 virt < (sb_out_virt + sb_out_length)) {
103 return sb_out_phys + (virt - sb_out_virt);
104 } else {
105 return virt_to_phys(virt);
106 }
107}
108
109static u8 *tzcom_phys_to_virt(s32 phys)
110{
111 if (phys >= sb_in_phys &&
112 phys < (sb_in_phys + sb_in_length)) {
113 return sb_in_virt + (phys - sb_in_phys);
114 } else if (phys >= sb_out_phys &&
115 phys < (sb_out_phys + sb_out_length)) {
116 return sb_out_virt + (phys - sb_out_phys);
117 } else {
118 return phys_to_virt(phys);
119 }
120}
121
122static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
123 struct tzcom_register_svc_op_req svc)
124{
125 struct tzcom_registered_svc_list *ptr;
126 int unique = 1;
127 unsigned long flags;
128
129 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
130 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
131 if (ptr->svc.svc_id == svc.svc_id) {
132 PERR("Service id: %u is already registered",
133 ptr->svc.svc_id);
134 unique = 0;
135 break;
136 } else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
137 svc.cmd_id_low <= ptr->svc.cmd_id_high) {
138 PERR("Cmd id low falls in the range of another"
139 "registered service");
140 unique = 0;
141 break;
142 } else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
143 svc.cmd_id_high <= ptr->svc.cmd_id_high) {
144 PERR("Cmd id high falls in the range of another"
145 "registered service");
146 unique = 0;
147 break;
148 }
149 }
150 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
151 return unique;
152}
153
154static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
155{
156 int ret;
157 unsigned long flags;
158 struct tzcom_register_svc_op_req rcvd_svc;
159 struct tzcom_registered_svc_list *new_entry;
160
161 ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
162
163 if (ret) {
164 PDEBUG("copy_from_user failed");
165 return ret;
166 }
167
168 PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
169 rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
170 rcvd_svc.cmd_id_high);
171 if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
172 PDEBUG("Provided service is not unique");
173 return -EINVAL;
174 }
175
176 rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
177
178 ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
179 if (ret) {
180 PDEBUG("copy_to_user failed");
181 return ret;
182 }
183
184 new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
185 if (!new_entry) {
186 pr_err("%s: kmalloc failed\n", __func__);
187 return -ENOMEM;
188 }
189 memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
190 new_entry->next_cmd_flag = 0;
191 init_waitqueue_head(&new_entry->next_cmd_wq);
192
193 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
194 list_add_tail(&new_entry->list, &data->registered_svc_list_head);
195 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
196
197
198 return ret;
199}
200
201static int tzcom_unregister_service(struct tzcom_data_t *data,
202 void __user *argp)
203{
204 int ret = 0;
205 unsigned long flags;
206 struct tzcom_unregister_svc_op_req req;
207 struct tzcom_registered_svc_list *ptr;
208 ret = copy_from_user(&req, argp, sizeof(req));
209 if (ret) {
210 PDEBUG("copy_from_user failed");
211 return ret;
212 }
213
214 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
215 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
216 if (req.svc_id == ptr->svc.svc_id &&
217 req.instance_id == ptr->svc.instance_id) {
218 wake_up_all(&ptr->next_cmd_wq);
219 list_del(&ptr->list);
220 kfree(ptr);
221 spin_unlock_irqrestore(&data->registered_svc_list_lock,
222 flags);
223 return 0;
224 }
225 }
226 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
227
228 return -EINVAL;
229}
230
231/**
232 * +---------+ +-----+ +-----------------+
233 * | TZCOM | | SCM | | TZCOM_SCHEDULER |
234 * +----+----+ +--+--+ +--------+--------+
235 * | | |
236 * | scm_call | |
237 * |------------------------------------->| |
238 * | cmd_buf = struct tzcom_command { | |
239 * | cmd_type, |------------------>|
240 * +------+------------- sb_in_cmd_addr, | |
241 * | | sb_in_cmd_len | |
242 * | | } | |
243 * | | resp_buf = struct tzcom_response { | |
244 * | cmd_status, | |
245 * | +---------- sb_in_rsp_addr, | |
246 * | | sb_in_rsp_len |<------------------|
247 * | | }
248 * | | struct tzcom_callback {---------+
249 * | | uint32_t cmd_id; |
250 * | | uint32_t sb_out_cb_data_len;|
251 * | +---------------+ uint32_t sb_out_cb_data_off;|
252 * | | } |
253 * | _________________________|_______________________________ |
254 * | +-----------------------+| +----------------------+ |
255 * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf | |
256 * +-----------------------+ +----------------------+ |
257 * _________________________________________________________ |
258 * INPUT SHARED BUFFER |
259 * +------------------------------------------------------------------------+
260 * | _________________________________________________________
261 * | +---------------------------------------------+
262 * +->| cmd_id | data_len | data_off | data... |
263 * +---------------------------------------------+
264 * |<------------>|copy to next_cmd.req_buf
265 * _________________________________________________________
266 * OUTPUT SHARED BUFFER
267 */
268static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
269{
270 int ret = 0;
271 unsigned long flags;
272 u32 reqd_len_sb_in = 0;
273 u32 reqd_len_sb_out = 0;
274 struct tzcom_send_cmd_op_req req;
275 struct tzcom_command cmd;
276 struct tzcom_response resp;
277 struct tzcom_callback *next_callback;
278 void *cb_data = NULL;
279 struct tzcom_callback_list *new_entry;
280 struct tzcom_callback *cb;
281 size_t new_entry_len = 0;
282 struct tzcom_registered_svc_list *ptr_svc;
283
284 ret = copy_from_user(&req, argp, sizeof(req));
285 if (ret) {
286 PDEBUG("copy_from_user failed");
287 return ret;
288 }
289
290 if (req.cmd_buf == NULL || req.resp_buf == NULL) {
291 PDEBUG("cmd buffer or response buffer is null");
292 return -EINVAL;
293 }
294
295 if (req.cmd_len <= 0 || req.resp_len <= 0) {
296 PDEBUG("cmd buffer length or "
297 "response buffer length not valid");
298 return -EINVAL;
299 }
300 PDEBUG("received cmd_req.req: 0x%p",
301 req.cmd_buf);
302 PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
303 req.resp_len,
304 req.resp_buf);
305
306 reqd_len_sb_in = req.cmd_len + req.resp_len;
307 if (reqd_len_sb_in > sb_in_length) {
308 PDEBUG("Not enough memory to fit cmd_buf and "
309 "resp_buf. Required: %u, Available: %u",
310 reqd_len_sb_in, sb_in_length);
311 return -ENOMEM;
312 }
313
314 /* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
315 mutex_lock(&sb_in_lock);
316 PDEBUG("Before memcpy on sb_in");
317 memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
318 PDEBUG("After memcpy on sb_in");
319
320 /* cmd_type will always be a new here */
321 cmd.cmd_type = TZ_SCHED_CMD_NEW;
322 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
323 cmd.sb_in_cmd_len = req.cmd_len;
324
325 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
326 resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
327 req.cmd_len);
328 resp.sb_in_rsp_len = req.resp_len;
329
330 PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
331 PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
332
333 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp, sizeof(resp));
334 mutex_unlock(&sb_in_lock);
335
336 while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
337 /*
338 * If cmd is incomplete, get the callback cmd out from SB out
339 * and put it on the list
340 */
341 PDEBUG("cmd_status is incomplete.");
342 next_callback = (struct tzcom_callback *)sb_out_virt;
343
344 mutex_lock(&sb_out_lock);
345 reqd_len_sb_out = sizeof(*next_callback)
346 + next_callback->sb_out_cb_data_len;
347 if (reqd_len_sb_out > sb_out_length) {
348 PDEBUG("Not enough memory to"
349 " fit tzcom_callback buffer."
350 " Required: %u, Available: %u",
351 reqd_len_sb_out, sb_out_length);
352 mutex_unlock(&sb_out_lock);
353 return -ENOMEM;
354 }
355
356 /* Assumption is cb_data_off is sizeof(tzcom_callback) */
357 new_entry_len = sizeof(*new_entry)
358 + next_callback->sb_out_cb_data_len;
359 new_entry = kmalloc(new_entry_len, GFP_KERNEL);
360 if (!new_entry) {
361 PERR("kmalloc failed");
362 mutex_unlock(&sb_out_lock);
363 return -ENOMEM;
364 }
365
366 cb = &new_entry->callback;
367 cb->cmd_id = next_callback->cmd_id;
368 cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
369 cb->sb_out_cb_data_off = next_callback->sb_out_cb_data_off;
370
371 cb_data = (u8 *)next_callback
372 + next_callback->sb_out_cb_data_off;
373 memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
374 next_callback->sb_out_cb_data_len);
375 mutex_unlock(&sb_out_lock);
376
377 mutex_lock(&data->callback_list_lock);
378 list_add_tail(&new_entry->list, &data->callback_list_head);
379 mutex_unlock(&data->callback_list_lock);
380
381 /*
382 * We don't know which service can handle the command. so we
383 * wake up all blocking services and let them figure out if
384 * they can handle the given command.
385 */
386 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
387 list_for_each_entry(ptr_svc,
388 &data->registered_svc_list_head, list) {
389 ptr_svc->next_cmd_flag = 1;
390 wake_up_interruptible(&ptr_svc->next_cmd_wq);
391 }
392 spin_unlock_irqrestore(&data->registered_svc_list_lock,
393 flags);
394
395 PDEBUG("waking up next_cmd_wq and "
396 "waiting for cont_cmd_wq");
397 if (wait_event_interruptible(data->cont_cmd_wq,
398 data->cont_cmd_flag != 0)) {
399 PDEBUG("Interrupted: exiting send_cmd loop");
400 return -ERESTARTSYS;
401 }
402 data->cont_cmd_flag = 0;
403 cmd.cmd_type = TZ_SCHED_CMD_PENDING;
404 mutex_lock(&sb_in_lock);
405 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
406 sizeof(resp));
407 mutex_unlock(&sb_in_lock);
408 }
409
410 mutex_lock(&sb_in_lock);
411 resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
412 resp.sb_in_rsp_len = req.resp_len;
413 mutex_unlock(&sb_in_lock);
414
415 /* Cmd is done now. Copy the response from SB in to user */
416 if (req.resp_len >= resp.sb_in_rsp_len) {
417 PDEBUG("Before memcpy resp_buf");
418 mutex_lock(&sb_in_lock);
419 memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
420 mutex_unlock(&sb_in_lock);
421 } else {
422 PDEBUG("Provided response buffer is smaller"
423 " than required. Required: %u,"
424 " Provided: %u",
425 resp.sb_in_rsp_len, req.resp_len);
426 ret = -ENOMEM;
427 }
428
429 PDEBUG("sending cmd_req.rsp "
430 "size: %u, ptr: 0x%p", req.resp_len,
431 req.resp_buf);
432 ret = copy_to_user(argp, &req, sizeof(req));
433 if (ret) {
434 PDEBUG("copy_to_user failed");
435 return ret;
436 }
437
438 return ret;
439}
440
441static struct tzcom_registered_svc_list *__tzcom_find_svc(
442 struct tzcom_data_t *data,
443 uint32_t instance_id)
444{
445 struct tzcom_registered_svc_list *entry;
446 unsigned long flags;
447
448 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
449 list_for_each_entry(entry,
450 &data->registered_svc_list_head, list) {
451 if (entry->svc.instance_id == instance_id)
452 break;
453 }
454 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
455
456 return entry;
457}
458
459static int __tzcom_copy_cmd(struct tzcom_data_t *data,
460 struct tzcom_next_cmd_op_req *req,
461 struct tzcom_registered_svc_list *ptr_svc)
462{
463 int found = 0;
464 int ret = -EAGAIN;
465 struct tzcom_callback_list *entry;
466 struct tzcom_callback *cb;
467
468 PDEBUG("In here");
469 mutex_lock(&data->callback_list_lock);
470 PDEBUG("Before looping through cmd and svc lists.");
471 list_for_each_entry(entry, &data->callback_list_head, list) {
472 cb = &entry->callback;
473 if (req->svc_id == ptr_svc->svc.svc_id &&
474 req->instance_id == ptr_svc->svc.instance_id &&
475 cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
476 cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
477 PDEBUG("Found matching entry");
478 found = 1;
479 if (cb->sb_out_cb_data_len <= req->req_len) {
480 PDEBUG("copying cmd buffer %p to req "
481 "buffer %p, length: %u",
482 (u8 *)cb + cb->sb_out_cb_data_off,
483 req->req_buf, cb->sb_out_cb_data_len);
484 req->cmd_id = cb->cmd_id;
485 ret = copy_to_user(req->req_buf,
486 (u8 *)cb + cb->sb_out_cb_data_off,
487 cb->sb_out_cb_data_len);
488 if (ret) {
489 PDEBUG("copy_to_user failed");
490 break;
491 }
492 list_del(&entry->list);
493 kfree(entry);
494 ret = 0;
495 } else {
496 PDEBUG("callback data buffer is "
497 "larger than provided buffer."
498 "Required: %u, Provided: %u",
499 cb->sb_out_cb_data_len,
500 req->req_len);
501 ret = -ENOMEM;
502 }
503 break;
504 }
505 }
506 PDEBUG("After looping through cmd and svc lists.");
507 mutex_unlock(&data->callback_list_lock);
508 return ret;
509}
510
511static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
512{
513 int ret = 0;
514 struct tzcom_next_cmd_op_req req;
515 struct tzcom_registered_svc_list *this_svc;
516
517 ret = copy_from_user(&req, argp, sizeof(req));
518 if (ret) {
519 PDEBUG("copy_from_user failed");
520 return ret;
521 }
522
523 if (req.instance_id > atomic_read(&svc_instance_ctr)) {
524 PDEBUG("Invalid instance_id for the request");
525 return -EINVAL;
526 }
527
528 if (!req.req_buf || req.req_len == 0) {
529 PDEBUG("Invalid request buffer or buffer length");
530 return -EINVAL;
531 }
532
533 PDEBUG("Before next_cmd loop");
534 this_svc = __tzcom_find_svc(data, req.instance_id);
535
536 while (1) {
537 PDEBUG("Before wait_event next_cmd.");
538 if (wait_event_interruptible(this_svc->next_cmd_wq,
539 this_svc->next_cmd_flag != 0)) {
540 PDEBUG("Interrupted: exiting wait_next_cmd loop");
541 /* woken up for different reason */
542 return -ERESTARTSYS;
543 }
544 PDEBUG("After wait_event next_cmd.");
545 this_svc->next_cmd_flag = 0;
546
547 ret = __tzcom_copy_cmd(data, &req, this_svc);
548 if (ret == 0) {
549 PDEBUG("Successfully found svc for cmd");
550 data->handled_cmd_svc_instance_id = req.instance_id;
551 break;
552 } else if (ret == -ENOMEM) {
553 PDEBUG("Not enough memory");
554 return ret;
555 }
556 }
557 ret = copy_to_user(argp, &req, sizeof(req));
558 if (ret) {
559 PDEBUG("copy_to_user failed");
560 return ret;
561 }
562 PDEBUG("copy_to_user is done.");
563 return ret;
564}
565
566static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
567{
568 int ret = 0;
569 struct tzcom_cont_cmd_op_req req;
570 ret = copy_from_user(&req, argp, sizeof(req));
571 if (ret) {
572 PDEBUG("copy_from_user failed");
573 return ret;
574 }
575
576 /*
577 * Only the svc instance that handled the cmd (in read_next_cmd method)
578 * can call continue cmd
579 */
580 if (data->handled_cmd_svc_instance_id != req.instance_id) {
581 PDEBUG("Only the service instance that handled the last "
582 "callback can continue cmd. "
583 "Expected: %u, Received: %u",
584 data->handled_cmd_svc_instance_id,
585 req.instance_id);
586 return -EINVAL;
587 }
588
589 if (req.resp_buf) {
590 mutex_lock(&sb_out_lock);
591 memcpy(sb_out_virt, req.resp_buf, req.resp_len);
592 mutex_unlock(&sb_out_lock);
593 }
594
595 data->cont_cmd_flag = 1;
596 wake_up_interruptible(&data->cont_cmd_wq);
597 return ret;
598}
599
600static long tzcom_ioctl(struct file *file, unsigned cmd,
601 unsigned long arg)
602{
603 int ret = 0;
604 struct tzcom_data_t *tzcom_data = file->private_data;
605 void __user *argp = (void __user *) arg;
606 PDEBUG("enter tzcom_ioctl()");
607 switch (cmd) {
608 case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
609 PDEBUG("ioctl register_service_req()");
610 ret = tzcom_register_service(tzcom_data, argp);
611 if (ret)
612 PDEBUG("failed tzcom_register_service: %d", ret);
613 break;
614 }
615 case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
616 PDEBUG("ioctl unregister_service_req()");
617 ret = tzcom_unregister_service(tzcom_data, argp);
618 if (ret)
619 PDEBUG("failed tzcom_unregister_service: %d", ret);
620 break;
621 }
622 case TZCOM_IOCTL_SEND_CMD_REQ: {
623 PDEBUG("ioctl send_cmd_req()");
624 /* Only one client allowed here at a time */
625 mutex_lock(&send_cmd_lock);
626 ret = tzcom_send_cmd(tzcom_data, argp);
627 mutex_unlock(&send_cmd_lock);
628 if (ret)
629 PDEBUG("failed tzcom_send_cmd: %d", ret);
630 break;
631 }
632 case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
633 PDEBUG("ioctl read_next_cmd_req()");
634 ret = tzcom_read_next_cmd(tzcom_data, argp);
635 if (ret)
636 PDEBUG("failed tzcom_read_next: %d", ret);
637 break;
638 }
639 case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
640 PDEBUG("ioctl continue_cmd_req()");
641 ret = tzcom_cont_cmd(tzcom_data, argp);
642 if (ret)
643 PDEBUG("failed tzcom_cont_cmd: %d", ret);
644 break;
645 }
646 default:
647 return -EINVAL;
648 }
649 return ret;
650}
651
652static int tzcom_open(struct inode *inode, struct file *file)
653{
654 long pil_error;
655 struct tz_pr_init_sb_req_s sb_out_init_req;
656 struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
657 void *rsp_addr_virt;
658 struct tzcom_command cmd;
659 struct tzcom_response resp;
660 struct tzcom_data_t *tzcom_data;
661
662 PDEBUG("In here");
663 if (pil == NULL) {
664 pil = pil_get("playrdy");
665 if (IS_ERR(pil)) {
666 PERR("Playready PIL image load failed");
667 pil_error = PTR_ERR(pil);
668 pil = NULL;
669 return pil_error;
670 }
671 PDEBUG("playrdy image loaded successfully");
672 }
673
674 sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
675 sb_out_init_req.sb_len = sb_out_length;
676 sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
677 PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
678 "sb_ptr (phys): 0x%x }",
679 sb_out_init_req.pr_cmd,
680 sb_out_init_req.sb_len,
681 sb_out_init_req.sb_ptr);
682
683 mutex_lock(&sb_in_lock);
684 PDEBUG("Before memcpy on sb_in");
685 memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
686 PDEBUG("After memcpy on sb_in");
687
688 /* It will always be a new cmd from this method */
689 cmd.cmd_type = TZ_SCHED_CMD_NEW;
690 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
691 cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
692 PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
693 "sb_in_cmd_len: %u }",
694 cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
695
696 resp.cmd_status = 0;
697 resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr + cmd.sb_in_cmd_len;
698 resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
699 PDEBUG("tzcom_response before scm { cmd_status: %u, "
700 "sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
701 resp.cmd_status, resp.sb_in_rsp_addr,
702 resp.sb_in_rsp_len);
703
704 PDEBUG("Before scm_call for sb_init");
705 tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
706 PDEBUG("After scm_call for sb_init");
707 PDEBUG("tzcom_response after scm { cmd_status: %u, "
708 "sb_in_rsp_addr: %p, sb_in_rsp_len: %u }",
709 resp.cmd_status, resp.sb_in_rsp_addr,
710 resp.sb_in_rsp_len);
711
712 if (resp.sb_in_rsp_addr) {
713 rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
714 resp.sb_in_rsp_addr);
715 PDEBUG("Received response phys: %p, virt: %p",
716 resp.sb_in_rsp_addr,
717 rsp_addr_virt);
718 memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
719 } else {
720 PERR("Error with SB initialization");
721 mutex_unlock(&sb_in_lock);
722 return -EPERM;
723 }
724 mutex_unlock(&sb_in_lock);
725
726 PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
727 sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
728
729 if (sb_out_init_rsp.ret) {
730 PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
731 return -EPERM;
732 }
733
734 tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
735 if (!tzcom_data) {
736 PERR("kmalloc failed");
737 return -ENOMEM;
738 }
739 file->private_data = tzcom_data;
740
741 INIT_LIST_HEAD(&tzcom_data->callback_list_head);
742 mutex_init(&tzcom_data->callback_list_lock);
743
744 INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
745 spin_lock_init(&tzcom_data->registered_svc_list_lock);
746
747 init_waitqueue_head(&tzcom_data->cont_cmd_wq);
748 tzcom_data->cont_cmd_flag = 0;
749 tzcom_data->handled_cmd_svc_instance_id = 0;
750 return 0;
751}
752
753static int tzcom_release(struct inode *inode, struct file *file)
754{
755 struct tzcom_data_t *tzcom_data = file->private_data;
756 struct tzcom_callback_list *lcb, *ncb;
757 struct tzcom_registered_svc_list *lsvc, *nsvc;
758 PDEBUG("In here");
759
760 wake_up_all(&tzcom_data->cont_cmd_wq);
761
762 list_for_each_entry_safe(lcb, ncb,
763 &tzcom_data->callback_list_head, list) {
764 list_del(&lcb->list);
765 kfree(lcb);
766 }
767
768 list_for_each_entry_safe(lsvc, nsvc,
769 &tzcom_data->registered_svc_list_head, list) {
770 wake_up_all(&lsvc->next_cmd_wq);
771 list_del(&lsvc->list);
772 kfree(lsvc);
773 }
774
775 kfree(tzcom_data);
776 return 0;
777}
778
779static const struct file_operations tzcom_fops = {
780 .owner = THIS_MODULE,
781 .unlocked_ioctl = tzcom_ioctl,
782 .open = tzcom_open,
783 .release = tzcom_release
784};
785
786static int __init tzcom_init(void)
787{
788 int rc;
789 struct device *class_dev;
790
791 PDEBUG("Hello tzcom");
792
793 rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
794 if (rc < 0) {
795 PERR("alloc_chrdev_region failed %d", rc);
796 return rc;
797 }
798
799 driver_class = class_create(THIS_MODULE, TZCOM_DEV);
800 if (IS_ERR(driver_class)) {
801 rc = -ENOMEM;
802 PERR("class_create failed %d", rc);
803 goto unregister_chrdev_region;
804 }
805
806 class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
807 TZCOM_DEV);
808 if (!class_dev) {
809 PERR("class_device_create failed %d", rc);
810 rc = -ENOMEM;
811 goto class_destroy;
812 }
813
814 cdev_init(&tzcom_cdev, &tzcom_fops);
815 tzcom_cdev.owner = THIS_MODULE;
816
817 rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
818 if (rc < 0) {
819 PERR("cdev_add failed %d", rc);
820 goto class_device_destroy;
821 }
822
823 sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
824 PMEM_ALIGNMENT_4K);
825 if (IS_ERR((void *)sb_in_phys)) {
826 PERR("could not allocte in kernel pmem buffers for sb_in");
827 rc = -ENOMEM;
828 goto class_device_destroy;
829 }
830 PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
831
832 sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
833 sb_in_length);
834 if (!sb_in_virt) {
835 PERR("Shared buffer IN allocation failed.");
836 rc = -ENOMEM;
837 goto class_device_destroy;
838 }
839 PDEBUG("sb_in virt address: %p, phys address: 0x%x",
840 sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
841
842 sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
843 PMEM_ALIGNMENT_4K);
844 if (IS_ERR((void *)sb_out_phys)) {
845 PERR("could not allocte in kernel pmem buffers for sb_out");
846 rc = -ENOMEM;
847 goto class_device_destroy;
848 }
849 PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
850
851 sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
852 sb_out_length);
853 if (!sb_out_virt) {
854 PERR("Shared buffer OUT allocation failed.");
855 rc = -ENOMEM;
856 goto class_device_destroy;
857 }
858 PDEBUG("sb_out virt address: %p, phys address: 0x%x",
859 sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
860
861 /* Initialized in tzcom_open */
862 pil = NULL;
863
864 return 0;
865
866class_device_destroy:
867 if (sb_in_virt)
868 iounmap(sb_in_virt);
869 if (sb_in_phys)
870 pmem_kfree(sb_in_phys);
871 if (sb_out_virt)
872 iounmap(sb_out_virt);
873 if (sb_out_phys)
874 pmem_kfree(sb_out_phys);
875 device_destroy(driver_class, tzcom_device_no);
876class_destroy:
877 class_destroy(driver_class);
878unregister_chrdev_region:
879 unregister_chrdev_region(tzcom_device_no, 1);
880 return rc;
881}
882
883static void __exit tzcom_exit(void)
884{
885 PDEBUG("Goodbye tzcom");
886 if (sb_in_virt)
887 iounmap(sb_in_virt);
888 if (sb_in_phys)
889 pmem_kfree(sb_in_phys);
890 if (sb_out_virt)
891 iounmap(sb_out_virt);
892 if (sb_out_phys)
893 pmem_kfree(sb_out_phys);
894 if (pil != NULL) {
895 pil_put("playrdy");
896 pil = NULL;
897 }
898 device_destroy(driver_class, tzcom_device_no);
899 class_destroy(driver_class);
900 unregister_chrdev_region(tzcom_device_no, 1);
901}
902
903
904MODULE_LICENSE("GPL v2");
905MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
906MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
907MODULE_VERSION("1.00");
908
909module_init(tzcom_init);
910module_exit(tzcom_exit);