blob: afd84588e957829bff65dff8dac23d188ad66757 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm TrustZone communicator driver
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define KMSG_COMPONENT "TZCOM"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/android_pmem.h>
30#include <linux/io.h>
31#include <mach/scm.h>
32#include <mach/peripheral-loader.h>
33#include <linux/tzcom.h>
34#include "tzcomi.h"
35
36#define TZCOM_DEV "tzcom"
37
38#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
39
40#undef PDEBUG
41#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
42 __func__, current->pid, current->comm, ## args)
43
44#undef PERR
45#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
46 __func__, current->pid, current->comm, ## args)
47
48
49static struct class *driver_class;
50static dev_t tzcom_device_no;
51static struct cdev tzcom_cdev;
52
53static u8 *sb_in_virt;
54static s32 sb_in_phys;
55static size_t sb_in_length = 20 * SZ_1K;
56static u8 *sb_out_virt;
57static s32 sb_out_phys;
58static size_t sb_out_length = 20 * SZ_1K;
59
60static void *pil;
61
62static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
63static DEFINE_MUTEX(sb_in_lock);
64static DEFINE_MUTEX(sb_out_lock);
65static DEFINE_MUTEX(send_cmd_lock);
66
67struct tzcom_callback_list {
68 struct list_head list;
69 struct tzcom_callback callback;
70};
71
72struct tzcom_registered_svc_list {
73 struct list_head list;
74 struct tzcom_register_svc_op_req svc;
75 wait_queue_head_t next_cmd_wq;
76 int next_cmd_flag;
77};
78
79struct tzcom_data_t {
80 struct list_head callback_list_head;
81 struct mutex callback_list_lock;
82 struct list_head registered_svc_list_head;
83 spinlock_t registered_svc_list_lock;
84 wait_queue_head_t cont_cmd_wq;
85 int cont_cmd_flag;
86 u32 handled_cmd_svc_instance_id;
87};
88
89static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
90 void *resp_buf, size_t resp_len)
91{
92 return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
93 cmd_buf, cmd_len, resp_buf, resp_len);
94}
95
96static s32 tzcom_virt_to_phys(u8 *virt)
97{
98 if (virt >= sb_in_virt &&
99 virt < (sb_in_virt + sb_in_length)) {
100 return sb_in_phys + (virt - sb_in_virt);
101 } else if (virt >= sb_out_virt &&
102 virt < (sb_out_virt + sb_out_length)) {
103 return sb_out_phys + (virt - sb_out_virt);
104 } else {
105 return virt_to_phys(virt);
106 }
107}
108
109static u8 *tzcom_phys_to_virt(s32 phys)
110{
111 if (phys >= sb_in_phys &&
112 phys < (sb_in_phys + sb_in_length)) {
113 return sb_in_virt + (phys - sb_in_phys);
114 } else if (phys >= sb_out_phys &&
115 phys < (sb_out_phys + sb_out_length)) {
116 return sb_out_virt + (phys - sb_out_phys);
117 } else {
118 return phys_to_virt(phys);
119 }
120}
121
122static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
123 struct tzcom_register_svc_op_req svc)
124{
125 struct tzcom_registered_svc_list *ptr;
126 int unique = 1;
127 unsigned long flags;
128
129 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
130 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
131 if (ptr->svc.svc_id == svc.svc_id) {
132 PERR("Service id: %u is already registered",
133 ptr->svc.svc_id);
134 unique = 0;
135 break;
136 } else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
137 svc.cmd_id_low <= ptr->svc.cmd_id_high) {
138 PERR("Cmd id low falls in the range of another"
139 "registered service");
140 unique = 0;
141 break;
142 } else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
143 svc.cmd_id_high <= ptr->svc.cmd_id_high) {
144 PERR("Cmd id high falls in the range of another"
145 "registered service");
146 unique = 0;
147 break;
148 }
149 }
150 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
151 return unique;
152}
153
154static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
155{
156 int ret;
157 unsigned long flags;
158 struct tzcom_register_svc_op_req rcvd_svc;
159 struct tzcom_registered_svc_list *new_entry;
160
161 ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
162
163 if (ret) {
164 PDEBUG("copy_from_user failed");
165 return ret;
166 }
167
168 PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
169 rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
170 rcvd_svc.cmd_id_high);
171 if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
172 PDEBUG("Provided service is not unique");
173 return -EINVAL;
174 }
175
176 rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
177
178 ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
179 if (ret) {
180 PDEBUG("copy_to_user failed");
181 return ret;
182 }
183
184 new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
185 if (!new_entry) {
186 pr_err("%s: kmalloc failed\n", __func__);
187 return -ENOMEM;
188 }
189 memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
190 new_entry->next_cmd_flag = 0;
191 init_waitqueue_head(&new_entry->next_cmd_wq);
192
193 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
194 list_add_tail(&new_entry->list, &data->registered_svc_list_head);
195 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
196
197
198 return ret;
199}
200
201static int tzcom_unregister_service(struct tzcom_data_t *data,
202 void __user *argp)
203{
204 int ret = 0;
205 unsigned long flags;
206 struct tzcom_unregister_svc_op_req req;
207 struct tzcom_registered_svc_list *ptr;
208 ret = copy_from_user(&req, argp, sizeof(req));
209 if (ret) {
210 PDEBUG("copy_from_user failed");
211 return ret;
212 }
213
214 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
215 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
216 if (req.svc_id == ptr->svc.svc_id &&
217 req.instance_id == ptr->svc.instance_id) {
218 wake_up_all(&ptr->next_cmd_wq);
219 list_del(&ptr->list);
220 kfree(ptr);
221 spin_unlock_irqrestore(&data->registered_svc_list_lock,
222 flags);
223 return 0;
224 }
225 }
226 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
227
228 return -EINVAL;
229}
230
231/**
232 * +---------+ +-----+ +-----------------+
233 * | TZCOM | | SCM | | TZCOM_SCHEDULER |
234 * +----+----+ +--+--+ +--------+--------+
235 * | | |
236 * | scm_call | |
237 * |------------------------------------->| |
238 * | cmd_buf = struct tzcom_command { | |
239 * | cmd_type, |------------------>|
240 * +------+------------- sb_in_cmd_addr, | |
241 * | | sb_in_cmd_len | |
242 * | | } | |
243 * | | resp_buf = struct tzcom_response { | |
244 * | cmd_status, | |
245 * | +---------- sb_in_rsp_addr, | |
246 * | | sb_in_rsp_len |<------------------|
247 * | | }
248 * | | struct tzcom_callback {---------+
249 * | | uint32_t cmd_id; |
250 * | | uint32_t sb_out_cb_data_len;|
251 * | +---------------+ uint32_t sb_out_cb_data_off;|
252 * | | } |
253 * | _________________________|_______________________________ |
254 * | +-----------------------+| +----------------------+ |
255 * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf | |
256 * +-----------------------+ +----------------------+ |
257 * _________________________________________________________ |
258 * INPUT SHARED BUFFER |
259 * +------------------------------------------------------------------------+
260 * | _________________________________________________________
261 * | +---------------------------------------------+
262 * +->| cmd_id | data_len | data_off | data... |
263 * +---------------------------------------------+
264 * |<------------>|copy to next_cmd.req_buf
265 * _________________________________________________________
266 * OUTPUT SHARED BUFFER
267 */
268static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
269{
270 int ret = 0;
271 unsigned long flags;
272 u32 reqd_len_sb_in = 0;
273 u32 reqd_len_sb_out = 0;
274 struct tzcom_send_cmd_op_req req;
275 struct tzcom_command cmd;
276 struct tzcom_response resp;
277 struct tzcom_callback *next_callback;
278 void *cb_data = NULL;
279 struct tzcom_callback_list *new_entry;
280 struct tzcom_callback *cb;
281 size_t new_entry_len = 0;
282 struct tzcom_registered_svc_list *ptr_svc;
283
284 ret = copy_from_user(&req, argp, sizeof(req));
285 if (ret) {
286 PDEBUG("copy_from_user failed");
287 return ret;
288 }
289
290 if (req.cmd_buf == NULL || req.resp_buf == NULL) {
291 PDEBUG("cmd buffer or response buffer is null");
292 return -EINVAL;
293 }
294
295 if (req.cmd_len <= 0 || req.resp_len <= 0) {
296 PDEBUG("cmd buffer length or "
297 "response buffer length not valid");
298 return -EINVAL;
299 }
300 PDEBUG("received cmd_req.req: 0x%p",
301 req.cmd_buf);
302 PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
303 req.resp_len,
304 req.resp_buf);
305
306 reqd_len_sb_in = req.cmd_len + req.resp_len;
307 if (reqd_len_sb_in > sb_in_length) {
308 PDEBUG("Not enough memory to fit cmd_buf and "
309 "resp_buf. Required: %u, Available: %u",
310 reqd_len_sb_in, sb_in_length);
311 return -ENOMEM;
312 }
313
314 /* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
315 mutex_lock(&sb_in_lock);
316 PDEBUG("Before memcpy on sb_in");
317 memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
318 PDEBUG("After memcpy on sb_in");
319
320 /* cmd_type will always be a new here */
321 cmd.cmd_type = TZ_SCHED_CMD_NEW;
322 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
323 cmd.sb_in_cmd_len = req.cmd_len;
324
325 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
326 resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
327 req.cmd_len);
328 resp.sb_in_rsp_len = req.resp_len;
329
330 PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
331 PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
332
333 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp, sizeof(resp));
334 mutex_unlock(&sb_in_lock);
335
336 while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
337 /*
338 * If cmd is incomplete, get the callback cmd out from SB out
339 * and put it on the list
340 */
341 PDEBUG("cmd_status is incomplete.");
342 next_callback = (struct tzcom_callback *)sb_out_virt;
343
344 mutex_lock(&sb_out_lock);
345 reqd_len_sb_out = sizeof(*next_callback)
346 + next_callback->sb_out_cb_data_len;
347 if (reqd_len_sb_out > sb_out_length) {
348 PDEBUG("Not enough memory to"
349 " fit tzcom_callback buffer."
350 " Required: %u, Available: %u",
351 reqd_len_sb_out, sb_out_length);
352 mutex_unlock(&sb_out_lock);
353 return -ENOMEM;
354 }
355
356 /* Assumption is cb_data_off is sizeof(tzcom_callback) */
357 new_entry_len = sizeof(*new_entry)
358 + next_callback->sb_out_cb_data_len;
359 new_entry = kmalloc(new_entry_len, GFP_KERNEL);
360 if (!new_entry) {
361 PERR("kmalloc failed");
362 mutex_unlock(&sb_out_lock);
363 return -ENOMEM;
364 }
365
366 cb = &new_entry->callback;
367 cb->cmd_id = next_callback->cmd_id;
368 cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
369 cb->sb_out_cb_data_off = next_callback->sb_out_cb_data_off;
370
371 cb_data = (u8 *)next_callback
372 + next_callback->sb_out_cb_data_off;
373 memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
374 next_callback->sb_out_cb_data_len);
375 mutex_unlock(&sb_out_lock);
376
377 mutex_lock(&data->callback_list_lock);
378 list_add_tail(&new_entry->list, &data->callback_list_head);
379 mutex_unlock(&data->callback_list_lock);
380
381 /*
382 * We don't know which service can handle the command. so we
383 * wake up all blocking services and let them figure out if
384 * they can handle the given command.
385 */
386 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
387 list_for_each_entry(ptr_svc,
388 &data->registered_svc_list_head, list) {
389 ptr_svc->next_cmd_flag = 1;
390 wake_up_interruptible(&ptr_svc->next_cmd_wq);
391 }
392 spin_unlock_irqrestore(&data->registered_svc_list_lock,
393 flags);
394
395 PDEBUG("waking up next_cmd_wq and "
396 "waiting for cont_cmd_wq");
397 if (wait_event_interruptible(data->cont_cmd_wq,
398 data->cont_cmd_flag != 0)) {
399 PDEBUG("Interrupted: exiting send_cmd loop");
400 return -ERESTARTSYS;
401 }
402 data->cont_cmd_flag = 0;
403 cmd.cmd_type = TZ_SCHED_CMD_PENDING;
404 mutex_lock(&sb_in_lock);
405 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
406 sizeof(resp));
407 mutex_unlock(&sb_in_lock);
408 }
409
410 mutex_lock(&sb_in_lock);
411 resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
412 resp.sb_in_rsp_len = req.resp_len;
Sachin Shahc3f8dd32011-06-17 11:39:10 -0700413 memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
414 /* Zero out memory for security purpose */
415 memset(sb_in_virt, 0, reqd_len_sb_in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 mutex_unlock(&sb_in_lock);
417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418 PDEBUG("sending cmd_req.rsp "
419 "size: %u, ptr: 0x%p", req.resp_len,
420 req.resp_buf);
421 ret = copy_to_user(argp, &req, sizeof(req));
422 if (ret) {
423 PDEBUG("copy_to_user failed");
424 return ret;
425 }
426
427 return ret;
428}
429
430static struct tzcom_registered_svc_list *__tzcom_find_svc(
431 struct tzcom_data_t *data,
432 uint32_t instance_id)
433{
434 struct tzcom_registered_svc_list *entry;
435 unsigned long flags;
436
437 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
438 list_for_each_entry(entry,
439 &data->registered_svc_list_head, list) {
440 if (entry->svc.instance_id == instance_id)
441 break;
442 }
443 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
444
445 return entry;
446}
447
448static int __tzcom_copy_cmd(struct tzcom_data_t *data,
449 struct tzcom_next_cmd_op_req *req,
450 struct tzcom_registered_svc_list *ptr_svc)
451{
452 int found = 0;
453 int ret = -EAGAIN;
454 struct tzcom_callback_list *entry;
455 struct tzcom_callback *cb;
456
457 PDEBUG("In here");
458 mutex_lock(&data->callback_list_lock);
459 PDEBUG("Before looping through cmd and svc lists.");
460 list_for_each_entry(entry, &data->callback_list_head, list) {
461 cb = &entry->callback;
462 if (req->svc_id == ptr_svc->svc.svc_id &&
463 req->instance_id == ptr_svc->svc.instance_id &&
464 cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
465 cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
466 PDEBUG("Found matching entry");
467 found = 1;
468 if (cb->sb_out_cb_data_len <= req->req_len) {
469 PDEBUG("copying cmd buffer %p to req "
470 "buffer %p, length: %u",
471 (u8 *)cb + cb->sb_out_cb_data_off,
472 req->req_buf, cb->sb_out_cb_data_len);
473 req->cmd_id = cb->cmd_id;
474 ret = copy_to_user(req->req_buf,
475 (u8 *)cb + cb->sb_out_cb_data_off,
476 cb->sb_out_cb_data_len);
477 if (ret) {
478 PDEBUG("copy_to_user failed");
479 break;
480 }
481 list_del(&entry->list);
482 kfree(entry);
483 ret = 0;
484 } else {
485 PDEBUG("callback data buffer is "
486 "larger than provided buffer."
487 "Required: %u, Provided: %u",
488 cb->sb_out_cb_data_len,
489 req->req_len);
490 ret = -ENOMEM;
491 }
492 break;
493 }
494 }
495 PDEBUG("After looping through cmd and svc lists.");
496 mutex_unlock(&data->callback_list_lock);
497 return ret;
498}
499
500static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
501{
502 int ret = 0;
503 struct tzcom_next_cmd_op_req req;
504 struct tzcom_registered_svc_list *this_svc;
505
506 ret = copy_from_user(&req, argp, sizeof(req));
507 if (ret) {
508 PDEBUG("copy_from_user failed");
509 return ret;
510 }
511
512 if (req.instance_id > atomic_read(&svc_instance_ctr)) {
513 PDEBUG("Invalid instance_id for the request");
514 return -EINVAL;
515 }
516
517 if (!req.req_buf || req.req_len == 0) {
518 PDEBUG("Invalid request buffer or buffer length");
519 return -EINVAL;
520 }
521
522 PDEBUG("Before next_cmd loop");
523 this_svc = __tzcom_find_svc(data, req.instance_id);
524
525 while (1) {
526 PDEBUG("Before wait_event next_cmd.");
527 if (wait_event_interruptible(this_svc->next_cmd_wq,
528 this_svc->next_cmd_flag != 0)) {
529 PDEBUG("Interrupted: exiting wait_next_cmd loop");
530 /* woken up for different reason */
531 return -ERESTARTSYS;
532 }
533 PDEBUG("After wait_event next_cmd.");
534 this_svc->next_cmd_flag = 0;
535
536 ret = __tzcom_copy_cmd(data, &req, this_svc);
537 if (ret == 0) {
538 PDEBUG("Successfully found svc for cmd");
539 data->handled_cmd_svc_instance_id = req.instance_id;
540 break;
541 } else if (ret == -ENOMEM) {
542 PDEBUG("Not enough memory");
543 return ret;
544 }
545 }
546 ret = copy_to_user(argp, &req, sizeof(req));
547 if (ret) {
548 PDEBUG("copy_to_user failed");
549 return ret;
550 }
551 PDEBUG("copy_to_user is done.");
552 return ret;
553}
554
555static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
556{
557 int ret = 0;
558 struct tzcom_cont_cmd_op_req req;
559 ret = copy_from_user(&req, argp, sizeof(req));
560 if (ret) {
561 PDEBUG("copy_from_user failed");
562 return ret;
563 }
564
565 /*
566 * Only the svc instance that handled the cmd (in read_next_cmd method)
567 * can call continue cmd
568 */
569 if (data->handled_cmd_svc_instance_id != req.instance_id) {
570 PDEBUG("Only the service instance that handled the last "
571 "callback can continue cmd. "
572 "Expected: %u, Received: %u",
573 data->handled_cmd_svc_instance_id,
574 req.instance_id);
575 return -EINVAL;
576 }
577
578 if (req.resp_buf) {
579 mutex_lock(&sb_out_lock);
580 memcpy(sb_out_virt, req.resp_buf, req.resp_len);
581 mutex_unlock(&sb_out_lock);
582 }
583
584 data->cont_cmd_flag = 1;
585 wake_up_interruptible(&data->cont_cmd_wq);
586 return ret;
587}
588
589static long tzcom_ioctl(struct file *file, unsigned cmd,
590 unsigned long arg)
591{
592 int ret = 0;
593 struct tzcom_data_t *tzcom_data = file->private_data;
594 void __user *argp = (void __user *) arg;
595 PDEBUG("enter tzcom_ioctl()");
596 switch (cmd) {
597 case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
598 PDEBUG("ioctl register_service_req()");
599 ret = tzcom_register_service(tzcom_data, argp);
600 if (ret)
601 PDEBUG("failed tzcom_register_service: %d", ret);
602 break;
603 }
604 case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
605 PDEBUG("ioctl unregister_service_req()");
606 ret = tzcom_unregister_service(tzcom_data, argp);
607 if (ret)
608 PDEBUG("failed tzcom_unregister_service: %d", ret);
609 break;
610 }
611 case TZCOM_IOCTL_SEND_CMD_REQ: {
612 PDEBUG("ioctl send_cmd_req()");
613 /* Only one client allowed here at a time */
614 mutex_lock(&send_cmd_lock);
615 ret = tzcom_send_cmd(tzcom_data, argp);
616 mutex_unlock(&send_cmd_lock);
617 if (ret)
618 PDEBUG("failed tzcom_send_cmd: %d", ret);
619 break;
620 }
621 case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
622 PDEBUG("ioctl read_next_cmd_req()");
623 ret = tzcom_read_next_cmd(tzcom_data, argp);
624 if (ret)
625 PDEBUG("failed tzcom_read_next: %d", ret);
626 break;
627 }
628 case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
629 PDEBUG("ioctl continue_cmd_req()");
630 ret = tzcom_cont_cmd(tzcom_data, argp);
631 if (ret)
632 PDEBUG("failed tzcom_cont_cmd: %d", ret);
633 break;
634 }
635 default:
636 return -EINVAL;
637 }
638 return ret;
639}
640
641static int tzcom_open(struct inode *inode, struct file *file)
642{
643 long pil_error;
644 struct tz_pr_init_sb_req_s sb_out_init_req;
645 struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
646 void *rsp_addr_virt;
647 struct tzcom_command cmd;
648 struct tzcom_response resp;
649 struct tzcom_data_t *tzcom_data;
650
651 PDEBUG("In here");
652 if (pil == NULL) {
653 pil = pil_get("playrdy");
654 if (IS_ERR(pil)) {
655 PERR("Playready PIL image load failed");
656 pil_error = PTR_ERR(pil);
657 pil = NULL;
658 return pil_error;
659 }
660 PDEBUG("playrdy image loaded successfully");
661 }
662
663 sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
664 sb_out_init_req.sb_len = sb_out_length;
665 sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
666 PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
667 "sb_ptr (phys): 0x%x }",
668 sb_out_init_req.pr_cmd,
669 sb_out_init_req.sb_len,
670 sb_out_init_req.sb_ptr);
671
672 mutex_lock(&sb_in_lock);
673 PDEBUG("Before memcpy on sb_in");
674 memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
675 PDEBUG("After memcpy on sb_in");
676
677 /* It will always be a new cmd from this method */
678 cmd.cmd_type = TZ_SCHED_CMD_NEW;
679 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
680 cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
681 PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
682 "sb_in_cmd_len: %u }",
683 cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
684
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700685 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686
687 PDEBUG("Before scm_call for sb_init");
688 tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
689 PDEBUG("After scm_call for sb_init");
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700690 PDEBUG("tzcom_response after scm cmd_status: %u", resp.cmd_status);
691 if (resp.cmd_status == TZ_SCHED_STATUS_COMPLETE) {
692 resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr +
693 cmd.sb_in_cmd_len;
694 resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
695 PDEBUG("tzcom_response sb_in_rsp_addr: %p, sb_in_rsp_len: %u",
696 resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
698 resp.sb_in_rsp_addr);
699 PDEBUG("Received response phys: %p, virt: %p",
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700700 resp.sb_in_rsp_addr, rsp_addr_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
702 } else {
703 PERR("Error with SB initialization");
704 mutex_unlock(&sb_in_lock);
705 return -EPERM;
706 }
707 mutex_unlock(&sb_in_lock);
708
709 PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
710 sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
711
712 if (sb_out_init_rsp.ret) {
713 PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
714 return -EPERM;
715 }
716
717 tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
718 if (!tzcom_data) {
719 PERR("kmalloc failed");
720 return -ENOMEM;
721 }
722 file->private_data = tzcom_data;
723
724 INIT_LIST_HEAD(&tzcom_data->callback_list_head);
725 mutex_init(&tzcom_data->callback_list_lock);
726
727 INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
728 spin_lock_init(&tzcom_data->registered_svc_list_lock);
729
730 init_waitqueue_head(&tzcom_data->cont_cmd_wq);
731 tzcom_data->cont_cmd_flag = 0;
732 tzcom_data->handled_cmd_svc_instance_id = 0;
733 return 0;
734}
735
736static int tzcom_release(struct inode *inode, struct file *file)
737{
738 struct tzcom_data_t *tzcom_data = file->private_data;
739 struct tzcom_callback_list *lcb, *ncb;
740 struct tzcom_registered_svc_list *lsvc, *nsvc;
741 PDEBUG("In here");
742
743 wake_up_all(&tzcom_data->cont_cmd_wq);
744
745 list_for_each_entry_safe(lcb, ncb,
746 &tzcom_data->callback_list_head, list) {
747 list_del(&lcb->list);
748 kfree(lcb);
749 }
750
751 list_for_each_entry_safe(lsvc, nsvc,
752 &tzcom_data->registered_svc_list_head, list) {
753 wake_up_all(&lsvc->next_cmd_wq);
754 list_del(&lsvc->list);
755 kfree(lsvc);
756 }
757
758 kfree(tzcom_data);
759 return 0;
760}
761
762static const struct file_operations tzcom_fops = {
763 .owner = THIS_MODULE,
764 .unlocked_ioctl = tzcom_ioctl,
765 .open = tzcom_open,
766 .release = tzcom_release
767};
768
769static int __init tzcom_init(void)
770{
771 int rc;
772 struct device *class_dev;
773
774 PDEBUG("Hello tzcom");
775
776 rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
777 if (rc < 0) {
778 PERR("alloc_chrdev_region failed %d", rc);
779 return rc;
780 }
781
782 driver_class = class_create(THIS_MODULE, TZCOM_DEV);
783 if (IS_ERR(driver_class)) {
784 rc = -ENOMEM;
785 PERR("class_create failed %d", rc);
786 goto unregister_chrdev_region;
787 }
788
789 class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
790 TZCOM_DEV);
791 if (!class_dev) {
792 PERR("class_device_create failed %d", rc);
793 rc = -ENOMEM;
794 goto class_destroy;
795 }
796
797 cdev_init(&tzcom_cdev, &tzcom_fops);
798 tzcom_cdev.owner = THIS_MODULE;
799
800 rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
801 if (rc < 0) {
802 PERR("cdev_add failed %d", rc);
803 goto class_device_destroy;
804 }
805
806 sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
807 PMEM_ALIGNMENT_4K);
808 if (IS_ERR((void *)sb_in_phys)) {
809 PERR("could not allocte in kernel pmem buffers for sb_in");
810 rc = -ENOMEM;
811 goto class_device_destroy;
812 }
813 PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
814
815 sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
816 sb_in_length);
817 if (!sb_in_virt) {
818 PERR("Shared buffer IN allocation failed.");
819 rc = -ENOMEM;
820 goto class_device_destroy;
821 }
822 PDEBUG("sb_in virt address: %p, phys address: 0x%x",
823 sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
824
825 sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
826 PMEM_ALIGNMENT_4K);
827 if (IS_ERR((void *)sb_out_phys)) {
828 PERR("could not allocte in kernel pmem buffers for sb_out");
829 rc = -ENOMEM;
830 goto class_device_destroy;
831 }
832 PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
833
834 sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
835 sb_out_length);
836 if (!sb_out_virt) {
837 PERR("Shared buffer OUT allocation failed.");
838 rc = -ENOMEM;
839 goto class_device_destroy;
840 }
841 PDEBUG("sb_out virt address: %p, phys address: 0x%x",
842 sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
843
844 /* Initialized in tzcom_open */
845 pil = NULL;
846
847 return 0;
848
849class_device_destroy:
850 if (sb_in_virt)
851 iounmap(sb_in_virt);
852 if (sb_in_phys)
853 pmem_kfree(sb_in_phys);
854 if (sb_out_virt)
855 iounmap(sb_out_virt);
856 if (sb_out_phys)
857 pmem_kfree(sb_out_phys);
858 device_destroy(driver_class, tzcom_device_no);
859class_destroy:
860 class_destroy(driver_class);
861unregister_chrdev_region:
862 unregister_chrdev_region(tzcom_device_no, 1);
863 return rc;
864}
865
866static void __exit tzcom_exit(void)
867{
868 PDEBUG("Goodbye tzcom");
869 if (sb_in_virt)
870 iounmap(sb_in_virt);
871 if (sb_in_phys)
872 pmem_kfree(sb_in_phys);
873 if (sb_out_virt)
874 iounmap(sb_out_virt);
875 if (sb_out_phys)
876 pmem_kfree(sb_out_phys);
877 if (pil != NULL) {
878 pil_put("playrdy");
879 pil = NULL;
880 }
881 device_destroy(driver_class, tzcom_device_no);
882 class_destroy(driver_class);
883 unregister_chrdev_region(tzcom_device_no, 1);
884}
885
886
887MODULE_LICENSE("GPL v2");
888MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
889MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
890MODULE_VERSION("1.00");
891
892module_init(tzcom_init);
893module_exit(tzcom_exit);