blob: ef8f126abdacee5218a81a1cefd3a199f1e655fb [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm TrustZone communicator driver
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define KMSG_COMPONENT "TZCOM"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/android_pmem.h>
30#include <linux/io.h>
31#include <mach/scm.h>
32#include <mach/peripheral-loader.h>
33#include <linux/tzcom.h>
34#include "tzcomi.h"
35
36#define TZCOM_DEV "tzcom"
37
38#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
39
40#undef PDEBUG
41#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
42 __func__, current->pid, current->comm, ## args)
43
44#undef PERR
45#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
46 __func__, current->pid, current->comm, ## args)
47
Sachin Shahf0b51cd2011-08-11 11:54:57 -070048#undef PWARN
49#define PWARN(fmt, args...) pr_warning("%s(%i, %s): " fmt "\n", \
50 __func__, current->pid, current->comm, ## args)
51
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
53static struct class *driver_class;
54static dev_t tzcom_device_no;
55static struct cdev tzcom_cdev;
56
57static u8 *sb_in_virt;
58static s32 sb_in_phys;
59static size_t sb_in_length = 20 * SZ_1K;
60static u8 *sb_out_virt;
61static s32 sb_out_phys;
62static size_t sb_out_length = 20 * SZ_1K;
63
64static void *pil;
65
66static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
67static DEFINE_MUTEX(sb_in_lock);
68static DEFINE_MUTEX(sb_out_lock);
69static DEFINE_MUTEX(send_cmd_lock);
70
71struct tzcom_callback_list {
72 struct list_head list;
73 struct tzcom_callback callback;
74};
75
76struct tzcom_registered_svc_list {
77 struct list_head list;
78 struct tzcom_register_svc_op_req svc;
79 wait_queue_head_t next_cmd_wq;
80 int next_cmd_flag;
81};
82
83struct tzcom_data_t {
84 struct list_head callback_list_head;
85 struct mutex callback_list_lock;
86 struct list_head registered_svc_list_head;
87 spinlock_t registered_svc_list_lock;
88 wait_queue_head_t cont_cmd_wq;
89 int cont_cmd_flag;
90 u32 handled_cmd_svc_instance_id;
91};
92
93static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
94 void *resp_buf, size_t resp_len)
95{
96 return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
97 cmd_buf, cmd_len, resp_buf, resp_len);
98}
99
100static s32 tzcom_virt_to_phys(u8 *virt)
101{
102 if (virt >= sb_in_virt &&
103 virt < (sb_in_virt + sb_in_length)) {
104 return sb_in_phys + (virt - sb_in_virt);
105 } else if (virt >= sb_out_virt &&
106 virt < (sb_out_virt + sb_out_length)) {
107 return sb_out_phys + (virt - sb_out_virt);
108 } else {
109 return virt_to_phys(virt);
110 }
111}
112
113static u8 *tzcom_phys_to_virt(s32 phys)
114{
115 if (phys >= sb_in_phys &&
116 phys < (sb_in_phys + sb_in_length)) {
117 return sb_in_virt + (phys - sb_in_phys);
118 } else if (phys >= sb_out_phys &&
119 phys < (sb_out_phys + sb_out_length)) {
120 return sb_out_virt + (phys - sb_out_phys);
121 } else {
122 return phys_to_virt(phys);
123 }
124}
125
126static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
127 struct tzcom_register_svc_op_req svc)
128{
129 struct tzcom_registered_svc_list *ptr;
130 int unique = 1;
131 unsigned long flags;
132
133 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
134 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
135 if (ptr->svc.svc_id == svc.svc_id) {
136 PERR("Service id: %u is already registered",
137 ptr->svc.svc_id);
138 unique = 0;
139 break;
140 } else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
141 svc.cmd_id_low <= ptr->svc.cmd_id_high) {
142 PERR("Cmd id low falls in the range of another"
143 "registered service");
144 unique = 0;
145 break;
146 } else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
147 svc.cmd_id_high <= ptr->svc.cmd_id_high) {
148 PERR("Cmd id high falls in the range of another"
149 "registered service");
150 unique = 0;
151 break;
152 }
153 }
154 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
155 return unique;
156}
157
158static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
159{
160 int ret;
161 unsigned long flags;
162 struct tzcom_register_svc_op_req rcvd_svc;
163 struct tzcom_registered_svc_list *new_entry;
164
165 ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
166
167 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700168 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 return ret;
170 }
171
172 PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
173 rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
174 rcvd_svc.cmd_id_high);
175 if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700176 PERR("Provided service is not unique");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 return -EINVAL;
178 }
179
180 rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
181
182 ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
183 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700184 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 return ret;
186 }
187
188 new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
189 if (!new_entry) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700190 PERR("kmalloc failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 return -ENOMEM;
192 }
193 memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
194 new_entry->next_cmd_flag = 0;
195 init_waitqueue_head(&new_entry->next_cmd_wq);
196
197 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
198 list_add_tail(&new_entry->list, &data->registered_svc_list_head);
199 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
200
201
202 return ret;
203}
204
205static int tzcom_unregister_service(struct tzcom_data_t *data,
206 void __user *argp)
207{
208 int ret = 0;
209 unsigned long flags;
210 struct tzcom_unregister_svc_op_req req;
211 struct tzcom_registered_svc_list *ptr;
212 ret = copy_from_user(&req, argp, sizeof(req));
213 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700214 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return ret;
216 }
217
218 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
219 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
220 if (req.svc_id == ptr->svc.svc_id &&
221 req.instance_id == ptr->svc.instance_id) {
222 wake_up_all(&ptr->next_cmd_wq);
223 list_del(&ptr->list);
224 kfree(ptr);
225 spin_unlock_irqrestore(&data->registered_svc_list_lock,
226 flags);
227 return 0;
228 }
229 }
230 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
231
232 return -EINVAL;
233}
234
235/**
236 * +---------+ +-----+ +-----------------+
237 * | TZCOM | | SCM | | TZCOM_SCHEDULER |
238 * +----+----+ +--+--+ +--------+--------+
239 * | | |
240 * | scm_call | |
241 * |------------------------------------->| |
242 * | cmd_buf = struct tzcom_command { | |
243 * | cmd_type, |------------------>|
244 * +------+------------- sb_in_cmd_addr, | |
245 * | | sb_in_cmd_len | |
246 * | | } | |
247 * | | resp_buf = struct tzcom_response { | |
248 * | cmd_status, | |
249 * | +---------- sb_in_rsp_addr, | |
250 * | | sb_in_rsp_len |<------------------|
251 * | | }
252 * | | struct tzcom_callback {---------+
253 * | | uint32_t cmd_id; |
254 * | | uint32_t sb_out_cb_data_len;|
255 * | +---------------+ uint32_t sb_out_cb_data_off;|
256 * | | } |
257 * | _________________________|_______________________________ |
258 * | +-----------------------+| +----------------------+ |
259 * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf | |
260 * +-----------------------+ +----------------------+ |
261 * _________________________________________________________ |
262 * INPUT SHARED BUFFER |
263 * +------------------------------------------------------------------------+
264 * | _________________________________________________________
265 * | +---------------------------------------------+
266 * +->| cmd_id | data_len | data_off | data... |
267 * +---------------------------------------------+
268 * |<------------>|copy to next_cmd.req_buf
269 * _________________________________________________________
270 * OUTPUT SHARED BUFFER
271 */
272static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
273{
274 int ret = 0;
275 unsigned long flags;
276 u32 reqd_len_sb_in = 0;
277 u32 reqd_len_sb_out = 0;
278 struct tzcom_send_cmd_op_req req;
279 struct tzcom_command cmd;
280 struct tzcom_response resp;
281 struct tzcom_callback *next_callback;
282 void *cb_data = NULL;
283 struct tzcom_callback_list *new_entry;
284 struct tzcom_callback *cb;
285 size_t new_entry_len = 0;
286 struct tzcom_registered_svc_list *ptr_svc;
287
288 ret = copy_from_user(&req, argp, sizeof(req));
289 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700290 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700291 return ret;
292 }
293
294 if (req.cmd_buf == NULL || req.resp_buf == NULL) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700295 PERR("cmd buffer or response buffer is null");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700296 return -EINVAL;
297 }
298
299 if (req.cmd_len <= 0 || req.resp_len <= 0) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700300 PERR("cmd buffer length or "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700301 "response buffer length not valid");
302 return -EINVAL;
303 }
304 PDEBUG("received cmd_req.req: 0x%p",
305 req.cmd_buf);
306 PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
307 req.resp_len,
308 req.resp_buf);
309
310 reqd_len_sb_in = req.cmd_len + req.resp_len;
311 if (reqd_len_sb_in > sb_in_length) {
312 PDEBUG("Not enough memory to fit cmd_buf and "
313 "resp_buf. Required: %u, Available: %u",
314 reqd_len_sb_in, sb_in_length);
315 return -ENOMEM;
316 }
317
318 /* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
319 mutex_lock(&sb_in_lock);
320 PDEBUG("Before memcpy on sb_in");
321 memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
322 PDEBUG("After memcpy on sb_in");
323
324 /* cmd_type will always be a new here */
325 cmd.cmd_type = TZ_SCHED_CMD_NEW;
326 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
327 cmd.sb_in_cmd_len = req.cmd_len;
328
329 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
330 resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
331 req.cmd_len);
332 resp.sb_in_rsp_len = req.resp_len;
333
334 PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
335 PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
336
337 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp, sizeof(resp));
338 mutex_unlock(&sb_in_lock);
339
340 while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
341 /*
342 * If cmd is incomplete, get the callback cmd out from SB out
343 * and put it on the list
344 */
345 PDEBUG("cmd_status is incomplete.");
346 next_callback = (struct tzcom_callback *)sb_out_virt;
347
348 mutex_lock(&sb_out_lock);
349 reqd_len_sb_out = sizeof(*next_callback)
350 + next_callback->sb_out_cb_data_len;
351 if (reqd_len_sb_out > sb_out_length) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700352 PERR("Not enough memory to"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 " fit tzcom_callback buffer."
354 " Required: %u, Available: %u",
355 reqd_len_sb_out, sb_out_length);
356 mutex_unlock(&sb_out_lock);
357 return -ENOMEM;
358 }
359
360 /* Assumption is cb_data_off is sizeof(tzcom_callback) */
361 new_entry_len = sizeof(*new_entry)
362 + next_callback->sb_out_cb_data_len;
363 new_entry = kmalloc(new_entry_len, GFP_KERNEL);
364 if (!new_entry) {
365 PERR("kmalloc failed");
366 mutex_unlock(&sb_out_lock);
367 return -ENOMEM;
368 }
369
370 cb = &new_entry->callback;
371 cb->cmd_id = next_callback->cmd_id;
372 cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
373 cb->sb_out_cb_data_off = next_callback->sb_out_cb_data_off;
374
375 cb_data = (u8 *)next_callback
376 + next_callback->sb_out_cb_data_off;
377 memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
378 next_callback->sb_out_cb_data_len);
379 mutex_unlock(&sb_out_lock);
380
381 mutex_lock(&data->callback_list_lock);
382 list_add_tail(&new_entry->list, &data->callback_list_head);
383 mutex_unlock(&data->callback_list_lock);
384
385 /*
386 * We don't know which service can handle the command. so we
387 * wake up all blocking services and let them figure out if
388 * they can handle the given command.
389 */
390 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
391 list_for_each_entry(ptr_svc,
392 &data->registered_svc_list_head, list) {
393 ptr_svc->next_cmd_flag = 1;
394 wake_up_interruptible(&ptr_svc->next_cmd_wq);
395 }
396 spin_unlock_irqrestore(&data->registered_svc_list_lock,
397 flags);
398
399 PDEBUG("waking up next_cmd_wq and "
400 "waiting for cont_cmd_wq");
401 if (wait_event_interruptible(data->cont_cmd_wq,
402 data->cont_cmd_flag != 0)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700403 PWARN("Interrupted: exiting send_cmd loop");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 return -ERESTARTSYS;
405 }
406 data->cont_cmd_flag = 0;
407 cmd.cmd_type = TZ_SCHED_CMD_PENDING;
408 mutex_lock(&sb_in_lock);
409 tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
410 sizeof(resp));
411 mutex_unlock(&sb_in_lock);
412 }
413
414 mutex_lock(&sb_in_lock);
415 resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
416 resp.sb_in_rsp_len = req.resp_len;
Sachin Shahc3f8dd32011-06-17 11:39:10 -0700417 memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
418 /* Zero out memory for security purpose */
419 memset(sb_in_virt, 0, reqd_len_sb_in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 mutex_unlock(&sb_in_lock);
421
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 PDEBUG("sending cmd_req.rsp "
423 "size: %u, ptr: 0x%p", req.resp_len,
424 req.resp_buf);
425 ret = copy_to_user(argp, &req, sizeof(req));
426 if (ret) {
427 PDEBUG("copy_to_user failed");
428 return ret;
429 }
430
431 return ret;
432}
433
434static struct tzcom_registered_svc_list *__tzcom_find_svc(
435 struct tzcom_data_t *data,
436 uint32_t instance_id)
437{
438 struct tzcom_registered_svc_list *entry;
439 unsigned long flags;
440
441 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
442 list_for_each_entry(entry,
443 &data->registered_svc_list_head, list) {
444 if (entry->svc.instance_id == instance_id)
445 break;
446 }
447 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
448
449 return entry;
450}
451
452static int __tzcom_copy_cmd(struct tzcom_data_t *data,
453 struct tzcom_next_cmd_op_req *req,
454 struct tzcom_registered_svc_list *ptr_svc)
455{
456 int found = 0;
457 int ret = -EAGAIN;
458 struct tzcom_callback_list *entry;
459 struct tzcom_callback *cb;
460
461 PDEBUG("In here");
462 mutex_lock(&data->callback_list_lock);
463 PDEBUG("Before looping through cmd and svc lists.");
464 list_for_each_entry(entry, &data->callback_list_head, list) {
465 cb = &entry->callback;
466 if (req->svc_id == ptr_svc->svc.svc_id &&
467 req->instance_id == ptr_svc->svc.instance_id &&
468 cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
469 cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
470 PDEBUG("Found matching entry");
471 found = 1;
472 if (cb->sb_out_cb_data_len <= req->req_len) {
473 PDEBUG("copying cmd buffer %p to req "
474 "buffer %p, length: %u",
475 (u8 *)cb + cb->sb_out_cb_data_off,
476 req->req_buf, cb->sb_out_cb_data_len);
477 req->cmd_id = cb->cmd_id;
478 ret = copy_to_user(req->req_buf,
479 (u8 *)cb + cb->sb_out_cb_data_off,
480 cb->sb_out_cb_data_len);
481 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700482 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483 break;
484 }
485 list_del(&entry->list);
486 kfree(entry);
487 ret = 0;
488 } else {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700489 PERR("callback data buffer is "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 "larger than provided buffer."
491 "Required: %u, Provided: %u",
492 cb->sb_out_cb_data_len,
493 req->req_len);
494 ret = -ENOMEM;
495 }
496 break;
497 }
498 }
499 PDEBUG("After looping through cmd and svc lists.");
500 mutex_unlock(&data->callback_list_lock);
501 return ret;
502}
503
504static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
505{
506 int ret = 0;
507 struct tzcom_next_cmd_op_req req;
508 struct tzcom_registered_svc_list *this_svc;
509
510 ret = copy_from_user(&req, argp, sizeof(req));
511 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700512 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 return ret;
514 }
515
516 if (req.instance_id > atomic_read(&svc_instance_ctr)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700517 PERR("Invalid instance_id for the request");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 return -EINVAL;
519 }
520
521 if (!req.req_buf || req.req_len == 0) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700522 PERR("Invalid request buffer or buffer length");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700523 return -EINVAL;
524 }
525
526 PDEBUG("Before next_cmd loop");
527 this_svc = __tzcom_find_svc(data, req.instance_id);
528
529 while (1) {
530 PDEBUG("Before wait_event next_cmd.");
531 if (wait_event_interruptible(this_svc->next_cmd_wq,
532 this_svc->next_cmd_flag != 0)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700533 PWARN("Interrupted: exiting wait_next_cmd loop");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 /* woken up for different reason */
535 return -ERESTARTSYS;
536 }
537 PDEBUG("After wait_event next_cmd.");
538 this_svc->next_cmd_flag = 0;
539
540 ret = __tzcom_copy_cmd(data, &req, this_svc);
541 if (ret == 0) {
542 PDEBUG("Successfully found svc for cmd");
543 data->handled_cmd_svc_instance_id = req.instance_id;
544 break;
545 } else if (ret == -ENOMEM) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700546 PERR("Not enough memory");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 return ret;
548 }
549 }
550 ret = copy_to_user(argp, &req, sizeof(req));
551 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700552 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 return ret;
554 }
555 PDEBUG("copy_to_user is done.");
556 return ret;
557}
558
559static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
560{
561 int ret = 0;
562 struct tzcom_cont_cmd_op_req req;
563 ret = copy_from_user(&req, argp, sizeof(req));
564 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700565 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700566 return ret;
567 }
568
569 /*
570 * Only the svc instance that handled the cmd (in read_next_cmd method)
571 * can call continue cmd
572 */
573 if (data->handled_cmd_svc_instance_id != req.instance_id) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700574 PWARN("Only the service instance that handled the last "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 "callback can continue cmd. "
576 "Expected: %u, Received: %u",
577 data->handled_cmd_svc_instance_id,
578 req.instance_id);
579 return -EINVAL;
580 }
581
582 if (req.resp_buf) {
583 mutex_lock(&sb_out_lock);
584 memcpy(sb_out_virt, req.resp_buf, req.resp_len);
585 mutex_unlock(&sb_out_lock);
586 }
587
588 data->cont_cmd_flag = 1;
589 wake_up_interruptible(&data->cont_cmd_wq);
590 return ret;
591}
592
593static long tzcom_ioctl(struct file *file, unsigned cmd,
594 unsigned long arg)
595{
596 int ret = 0;
597 struct tzcom_data_t *tzcom_data = file->private_data;
598 void __user *argp = (void __user *) arg;
599 PDEBUG("enter tzcom_ioctl()");
600 switch (cmd) {
601 case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
602 PDEBUG("ioctl register_service_req()");
603 ret = tzcom_register_service(tzcom_data, argp);
604 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700605 PERR("failed tzcom_register_service: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 break;
607 }
608 case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
609 PDEBUG("ioctl unregister_service_req()");
610 ret = tzcom_unregister_service(tzcom_data, argp);
611 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700612 PERR("failed tzcom_unregister_service: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700613 break;
614 }
615 case TZCOM_IOCTL_SEND_CMD_REQ: {
616 PDEBUG("ioctl send_cmd_req()");
617 /* Only one client allowed here at a time */
618 mutex_lock(&send_cmd_lock);
619 ret = tzcom_send_cmd(tzcom_data, argp);
620 mutex_unlock(&send_cmd_lock);
621 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700622 PERR("failed tzcom_send_cmd: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 break;
624 }
625 case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
626 PDEBUG("ioctl read_next_cmd_req()");
627 ret = tzcom_read_next_cmd(tzcom_data, argp);
628 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700629 PERR("failed tzcom_read_next: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630 break;
631 }
632 case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
633 PDEBUG("ioctl continue_cmd_req()");
634 ret = tzcom_cont_cmd(tzcom_data, argp);
635 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700636 PERR("failed tzcom_cont_cmd: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 break;
638 }
639 default:
640 return -EINVAL;
641 }
642 return ret;
643}
644
645static int tzcom_open(struct inode *inode, struct file *file)
646{
647 long pil_error;
648 struct tz_pr_init_sb_req_s sb_out_init_req;
649 struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
650 void *rsp_addr_virt;
651 struct tzcom_command cmd;
652 struct tzcom_response resp;
653 struct tzcom_data_t *tzcom_data;
654
655 PDEBUG("In here");
656 if (pil == NULL) {
657 pil = pil_get("playrdy");
658 if (IS_ERR(pil)) {
659 PERR("Playready PIL image load failed");
660 pil_error = PTR_ERR(pil);
661 pil = NULL;
662 return pil_error;
663 }
664 PDEBUG("playrdy image loaded successfully");
665 }
666
667 sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
668 sb_out_init_req.sb_len = sb_out_length;
669 sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
670 PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
671 "sb_ptr (phys): 0x%x }",
672 sb_out_init_req.pr_cmd,
673 sb_out_init_req.sb_len,
674 sb_out_init_req.sb_ptr);
675
676 mutex_lock(&sb_in_lock);
677 PDEBUG("Before memcpy on sb_in");
678 memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
679 PDEBUG("After memcpy on sb_in");
680
681 /* It will always be a new cmd from this method */
682 cmd.cmd_type = TZ_SCHED_CMD_NEW;
683 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
684 cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
685 PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
686 "sb_in_cmd_len: %u }",
687 cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
688
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700689 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690
691 PDEBUG("Before scm_call for sb_init");
692 tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
693 PDEBUG("After scm_call for sb_init");
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700694 PDEBUG("tzcom_response after scm cmd_status: %u", resp.cmd_status);
695 if (resp.cmd_status == TZ_SCHED_STATUS_COMPLETE) {
696 resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr +
697 cmd.sb_in_cmd_len;
698 resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
699 PDEBUG("tzcom_response sb_in_rsp_addr: %p, sb_in_rsp_len: %u",
700 resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
702 resp.sb_in_rsp_addr);
703 PDEBUG("Received response phys: %p, virt: %p",
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700704 resp.sb_in_rsp_addr, rsp_addr_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
706 } else {
707 PERR("Error with SB initialization");
708 mutex_unlock(&sb_in_lock);
709 return -EPERM;
710 }
711 mutex_unlock(&sb_in_lock);
712
713 PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
714 sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
715
716 if (sb_out_init_rsp.ret) {
717 PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
718 return -EPERM;
719 }
720
721 tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
722 if (!tzcom_data) {
723 PERR("kmalloc failed");
724 return -ENOMEM;
725 }
726 file->private_data = tzcom_data;
727
728 INIT_LIST_HEAD(&tzcom_data->callback_list_head);
729 mutex_init(&tzcom_data->callback_list_lock);
730
731 INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
732 spin_lock_init(&tzcom_data->registered_svc_list_lock);
733
734 init_waitqueue_head(&tzcom_data->cont_cmd_wq);
735 tzcom_data->cont_cmd_flag = 0;
736 tzcom_data->handled_cmd_svc_instance_id = 0;
737 return 0;
738}
739
740static int tzcom_release(struct inode *inode, struct file *file)
741{
742 struct tzcom_data_t *tzcom_data = file->private_data;
743 struct tzcom_callback_list *lcb, *ncb;
744 struct tzcom_registered_svc_list *lsvc, *nsvc;
745 PDEBUG("In here");
746
747 wake_up_all(&tzcom_data->cont_cmd_wq);
748
749 list_for_each_entry_safe(lcb, ncb,
750 &tzcom_data->callback_list_head, list) {
751 list_del(&lcb->list);
752 kfree(lcb);
753 }
754
755 list_for_each_entry_safe(lsvc, nsvc,
756 &tzcom_data->registered_svc_list_head, list) {
757 wake_up_all(&lsvc->next_cmd_wq);
758 list_del(&lsvc->list);
759 kfree(lsvc);
760 }
761
762 kfree(tzcom_data);
763 return 0;
764}
765
766static const struct file_operations tzcom_fops = {
767 .owner = THIS_MODULE,
768 .unlocked_ioctl = tzcom_ioctl,
769 .open = tzcom_open,
770 .release = tzcom_release
771};
772
773static int __init tzcom_init(void)
774{
775 int rc;
776 struct device *class_dev;
777
778 PDEBUG("Hello tzcom");
779
780 rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
781 if (rc < 0) {
782 PERR("alloc_chrdev_region failed %d", rc);
783 return rc;
784 }
785
786 driver_class = class_create(THIS_MODULE, TZCOM_DEV);
787 if (IS_ERR(driver_class)) {
788 rc = -ENOMEM;
789 PERR("class_create failed %d", rc);
790 goto unregister_chrdev_region;
791 }
792
793 class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
794 TZCOM_DEV);
795 if (!class_dev) {
796 PERR("class_device_create failed %d", rc);
797 rc = -ENOMEM;
798 goto class_destroy;
799 }
800
801 cdev_init(&tzcom_cdev, &tzcom_fops);
802 tzcom_cdev.owner = THIS_MODULE;
803
804 rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
805 if (rc < 0) {
806 PERR("cdev_add failed %d", rc);
807 goto class_device_destroy;
808 }
809
810 sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
811 PMEM_ALIGNMENT_4K);
812 if (IS_ERR((void *)sb_in_phys)) {
813 PERR("could not allocte in kernel pmem buffers for sb_in");
814 rc = -ENOMEM;
815 goto class_device_destroy;
816 }
817 PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
818
819 sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
820 sb_in_length);
821 if (!sb_in_virt) {
822 PERR("Shared buffer IN allocation failed.");
823 rc = -ENOMEM;
824 goto class_device_destroy;
825 }
826 PDEBUG("sb_in virt address: %p, phys address: 0x%x",
827 sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
828
829 sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
830 PMEM_ALIGNMENT_4K);
831 if (IS_ERR((void *)sb_out_phys)) {
832 PERR("could not allocte in kernel pmem buffers for sb_out");
833 rc = -ENOMEM;
834 goto class_device_destroy;
835 }
836 PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
837
838 sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
839 sb_out_length);
840 if (!sb_out_virt) {
841 PERR("Shared buffer OUT allocation failed.");
842 rc = -ENOMEM;
843 goto class_device_destroy;
844 }
845 PDEBUG("sb_out virt address: %p, phys address: 0x%x",
846 sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
847
848 /* Initialized in tzcom_open */
849 pil = NULL;
850
851 return 0;
852
853class_device_destroy:
854 if (sb_in_virt)
855 iounmap(sb_in_virt);
856 if (sb_in_phys)
857 pmem_kfree(sb_in_phys);
858 if (sb_out_virt)
859 iounmap(sb_out_virt);
860 if (sb_out_phys)
861 pmem_kfree(sb_out_phys);
862 device_destroy(driver_class, tzcom_device_no);
863class_destroy:
864 class_destroy(driver_class);
865unregister_chrdev_region:
866 unregister_chrdev_region(tzcom_device_no, 1);
867 return rc;
868}
869
870static void __exit tzcom_exit(void)
871{
872 PDEBUG("Goodbye tzcom");
873 if (sb_in_virt)
874 iounmap(sb_in_virt);
875 if (sb_in_phys)
876 pmem_kfree(sb_in_phys);
877 if (sb_out_virt)
878 iounmap(sb_out_virt);
879 if (sb_out_phys)
880 pmem_kfree(sb_out_phys);
881 if (pil != NULL) {
882 pil_put("playrdy");
883 pil = NULL;
884 }
885 device_destroy(driver_class, tzcom_device_no);
886 class_destroy(driver_class);
887 unregister_chrdev_region(tzcom_device_no, 1);
888}
889
890
891MODULE_LICENSE("GPL v2");
892MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
893MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
894MODULE_VERSION("1.00");
895
896module_init(tzcom_init);
897module_exit(tzcom_exit);