blob: ccd84447620c275a4149bb63ee1527e556449b11 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm TrustZone communicator driver
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#define KMSG_COMPONENT "TZCOM"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/android_pmem.h>
30#include <linux/io.h>
31#include <mach/scm.h>
32#include <mach/peripheral-loader.h>
33#include <linux/tzcom.h>
34#include "tzcomi.h"
35
36#define TZCOM_DEV "tzcom"
37
38#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
39
40#undef PDEBUG
41#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
42 __func__, current->pid, current->comm, ## args)
43
44#undef PERR
45#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
46 __func__, current->pid, current->comm, ## args)
47
Sachin Shahf0b51cd2011-08-11 11:54:57 -070048#undef PWARN
49#define PWARN(fmt, args...) pr_warning("%s(%i, %s): " fmt "\n", \
50 __func__, current->pid, current->comm, ## args)
51
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052
53static struct class *driver_class;
54static dev_t tzcom_device_no;
55static struct cdev tzcom_cdev;
56
57static u8 *sb_in_virt;
58static s32 sb_in_phys;
59static size_t sb_in_length = 20 * SZ_1K;
60static u8 *sb_out_virt;
61static s32 sb_out_phys;
62static size_t sb_out_length = 20 * SZ_1K;
63
64static void *pil;
65
66static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
67static DEFINE_MUTEX(sb_in_lock);
68static DEFINE_MUTEX(sb_out_lock);
69static DEFINE_MUTEX(send_cmd_lock);
70
71struct tzcom_callback_list {
72 struct list_head list;
73 struct tzcom_callback callback;
74};
75
76struct tzcom_registered_svc_list {
77 struct list_head list;
78 struct tzcom_register_svc_op_req svc;
79 wait_queue_head_t next_cmd_wq;
80 int next_cmd_flag;
81};
82
83struct tzcom_data_t {
84 struct list_head callback_list_head;
85 struct mutex callback_list_lock;
86 struct list_head registered_svc_list_head;
87 spinlock_t registered_svc_list_lock;
88 wait_queue_head_t cont_cmd_wq;
89 int cont_cmd_flag;
90 u32 handled_cmd_svc_instance_id;
91};
92
93static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
94 void *resp_buf, size_t resp_len)
95{
96 return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
97 cmd_buf, cmd_len, resp_buf, resp_len);
98}
99
100static s32 tzcom_virt_to_phys(u8 *virt)
101{
102 if (virt >= sb_in_virt &&
103 virt < (sb_in_virt + sb_in_length)) {
104 return sb_in_phys + (virt - sb_in_virt);
105 } else if (virt >= sb_out_virt &&
106 virt < (sb_out_virt + sb_out_length)) {
107 return sb_out_phys + (virt - sb_out_virt);
108 } else {
109 return virt_to_phys(virt);
110 }
111}
112
113static u8 *tzcom_phys_to_virt(s32 phys)
114{
115 if (phys >= sb_in_phys &&
116 phys < (sb_in_phys + sb_in_length)) {
117 return sb_in_virt + (phys - sb_in_phys);
118 } else if (phys >= sb_out_phys &&
119 phys < (sb_out_phys + sb_out_length)) {
120 return sb_out_virt + (phys - sb_out_phys);
121 } else {
122 return phys_to_virt(phys);
123 }
124}
125
126static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
127 struct tzcom_register_svc_op_req svc)
128{
129 struct tzcom_registered_svc_list *ptr;
130 int unique = 1;
131 unsigned long flags;
132
133 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
134 list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
135 if (ptr->svc.svc_id == svc.svc_id) {
136 PERR("Service id: %u is already registered",
137 ptr->svc.svc_id);
138 unique = 0;
139 break;
140 } else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
141 svc.cmd_id_low <= ptr->svc.cmd_id_high) {
142 PERR("Cmd id low falls in the range of another"
143 "registered service");
144 unique = 0;
145 break;
146 } else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
147 svc.cmd_id_high <= ptr->svc.cmd_id_high) {
148 PERR("Cmd id high falls in the range of another"
149 "registered service");
150 unique = 0;
151 break;
152 }
153 }
154 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
155 return unique;
156}
157
158static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
159{
160 int ret;
161 unsigned long flags;
162 struct tzcom_register_svc_op_req rcvd_svc;
163 struct tzcom_registered_svc_list *new_entry;
164
165 ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
166
167 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700168 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 return ret;
170 }
171
172 PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
173 rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
174 rcvd_svc.cmd_id_high);
175 if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700176 PERR("Provided service is not unique");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 return -EINVAL;
178 }
179
180 rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
181
182 ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
183 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700184 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 return ret;
186 }
187
188 new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
189 if (!new_entry) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700190 PERR("kmalloc failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 return -ENOMEM;
192 }
193 memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
194 new_entry->next_cmd_flag = 0;
195 init_waitqueue_head(&new_entry->next_cmd_wq);
196
197 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
198 list_add_tail(&new_entry->list, &data->registered_svc_list_head);
199 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
200
201
202 return ret;
203}
204
205static int tzcom_unregister_service(struct tzcom_data_t *data,
206 void __user *argp)
207{
208 int ret = 0;
209 unsigned long flags;
210 struct tzcom_unregister_svc_op_req req;
Sachin Shahbae4ec02011-08-15 19:52:31 -0700211 struct tzcom_registered_svc_list *ptr, *next;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700212 ret = copy_from_user(&req, argp, sizeof(req));
213 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700214 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 return ret;
216 }
217
218 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
Sachin Shahbae4ec02011-08-15 19:52:31 -0700219 list_for_each_entry_safe(ptr, next, &data->registered_svc_list_head,
220 list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 if (req.svc_id == ptr->svc.svc_id &&
222 req.instance_id == ptr->svc.instance_id) {
223 wake_up_all(&ptr->next_cmd_wq);
224 list_del(&ptr->list);
225 kfree(ptr);
226 spin_unlock_irqrestore(&data->registered_svc_list_lock,
227 flags);
228 return 0;
229 }
230 }
231 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
232
233 return -EINVAL;
234}
235
236/**
237 * +---------+ +-----+ +-----------------+
238 * | TZCOM | | SCM | | TZCOM_SCHEDULER |
239 * +----+----+ +--+--+ +--------+--------+
240 * | | |
241 * | scm_call | |
242 * |------------------------------------->| |
243 * | cmd_buf = struct tzcom_command { | |
244 * | cmd_type, |------------------>|
245 * +------+------------- sb_in_cmd_addr, | |
246 * | | sb_in_cmd_len | |
247 * | | } | |
248 * | | resp_buf = struct tzcom_response { | |
249 * | cmd_status, | |
250 * | +---------- sb_in_rsp_addr, | |
251 * | | sb_in_rsp_len |<------------------|
252 * | | }
253 * | | struct tzcom_callback {---------+
254 * | | uint32_t cmd_id; |
255 * | | uint32_t sb_out_cb_data_len;|
256 * | +---------------+ uint32_t sb_out_cb_data_off;|
257 * | | } |
258 * | _________________________|_______________________________ |
259 * | +-----------------------+| +----------------------+ |
260 * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf | |
261 * +-----------------------+ +----------------------+ |
262 * _________________________________________________________ |
263 * INPUT SHARED BUFFER |
264 * +------------------------------------------------------------------------+
265 * | _________________________________________________________
266 * | +---------------------------------------------+
267 * +->| cmd_id | data_len | data_off | data... |
268 * +---------------------------------------------+
269 * |<------------>|copy to next_cmd.req_buf
270 * _________________________________________________________
271 * OUTPUT SHARED BUFFER
272 */
273static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
274{
275 int ret = 0;
276 unsigned long flags;
277 u32 reqd_len_sb_in = 0;
278 u32 reqd_len_sb_out = 0;
279 struct tzcom_send_cmd_op_req req;
280 struct tzcom_command cmd;
281 struct tzcom_response resp;
282 struct tzcom_callback *next_callback;
283 void *cb_data = NULL;
284 struct tzcom_callback_list *new_entry;
285 struct tzcom_callback *cb;
286 size_t new_entry_len = 0;
287 struct tzcom_registered_svc_list *ptr_svc;
288
289 ret = copy_from_user(&req, argp, sizeof(req));
290 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700291 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700292 return ret;
293 }
294
295 if (req.cmd_buf == NULL || req.resp_buf == NULL) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700296 PERR("cmd buffer or response buffer is null");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700297 return -EINVAL;
298 }
299
Sachin Shahbae4ec02011-08-15 19:52:31 -0700300 if (req.cmd_len <= 0 || req.resp_len <= 0 ||
301 req.cmd_len > sb_in_length || req.resp_len > sb_in_length) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700302 PERR("cmd buffer length or "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 "response buffer length not valid");
304 return -EINVAL;
305 }
306 PDEBUG("received cmd_req.req: 0x%p",
307 req.cmd_buf);
308 PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
309 req.resp_len,
310 req.resp_buf);
311
312 reqd_len_sb_in = req.cmd_len + req.resp_len;
313 if (reqd_len_sb_in > sb_in_length) {
314 PDEBUG("Not enough memory to fit cmd_buf and "
315 "resp_buf. Required: %u, Available: %u",
316 reqd_len_sb_in, sb_in_length);
317 return -ENOMEM;
318 }
319
320 /* Copy req.cmd_buf to SB in and set req.resp_buf to SB in + cmd_len */
321 mutex_lock(&sb_in_lock);
322 PDEBUG("Before memcpy on sb_in");
323 memcpy(sb_in_virt, req.cmd_buf, req.cmd_len);
324 PDEBUG("After memcpy on sb_in");
325
326 /* cmd_type will always be a new here */
327 cmd.cmd_type = TZ_SCHED_CMD_NEW;
328 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
329 cmd.sb_in_cmd_len = req.cmd_len;
330
331 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
332 resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
333 req.cmd_len);
334 resp.sb_in_rsp_len = req.resp_len;
335
336 PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req.cmd_id);
337 PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
338
Sachin Shahbae4ec02011-08-15 19:52:31 -0700339 ret = tzcom_scm_call((const void *) &cmd, sizeof(cmd),
340 &resp, sizeof(resp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341 mutex_unlock(&sb_in_lock);
342
Sachin Shahbae4ec02011-08-15 19:52:31 -0700343 if (ret) {
344 PERR("tzcom_scm_call failed with err: %d", ret);
345 return ret;
346 }
347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
349 /*
350 * If cmd is incomplete, get the callback cmd out from SB out
351 * and put it on the list
352 */
353 PDEBUG("cmd_status is incomplete.");
354 next_callback = (struct tzcom_callback *)sb_out_virt;
355
356 mutex_lock(&sb_out_lock);
357 reqd_len_sb_out = sizeof(*next_callback)
358 + next_callback->sb_out_cb_data_len;
Sachin Shahbae4ec02011-08-15 19:52:31 -0700359 if (reqd_len_sb_out > sb_out_length ||
360 reqd_len_sb_out < sizeof(*next_callback) ||
361 next_callback->sb_out_cb_data_len > sb_out_length) {
362 PERR("Incorrect callback data length"
363 " Required: %u, Available: %u, Min: %u",
364 reqd_len_sb_out, sb_out_length,
365 sizeof(*next_callback));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 mutex_unlock(&sb_out_lock);
367 return -ENOMEM;
368 }
369
370 /* Assumption is cb_data_off is sizeof(tzcom_callback) */
371 new_entry_len = sizeof(*new_entry)
372 + next_callback->sb_out_cb_data_len;
373 new_entry = kmalloc(new_entry_len, GFP_KERNEL);
374 if (!new_entry) {
375 PERR("kmalloc failed");
376 mutex_unlock(&sb_out_lock);
377 return -ENOMEM;
378 }
379
380 cb = &new_entry->callback;
381 cb->cmd_id = next_callback->cmd_id;
382 cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
Sachin Shahbae4ec02011-08-15 19:52:31 -0700383 cb->sb_out_cb_data_off = sizeof(*cb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385 cb_data = (u8 *)next_callback
386 + next_callback->sb_out_cb_data_off;
387 memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
388 next_callback->sb_out_cb_data_len);
389 mutex_unlock(&sb_out_lock);
390
391 mutex_lock(&data->callback_list_lock);
392 list_add_tail(&new_entry->list, &data->callback_list_head);
393 mutex_unlock(&data->callback_list_lock);
394
395 /*
396 * We don't know which service can handle the command. so we
397 * wake up all blocking services and let them figure out if
398 * they can handle the given command.
399 */
400 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
401 list_for_each_entry(ptr_svc,
402 &data->registered_svc_list_head, list) {
403 ptr_svc->next_cmd_flag = 1;
404 wake_up_interruptible(&ptr_svc->next_cmd_wq);
405 }
406 spin_unlock_irqrestore(&data->registered_svc_list_lock,
407 flags);
408
409 PDEBUG("waking up next_cmd_wq and "
410 "waiting for cont_cmd_wq");
411 if (wait_event_interruptible(data->cont_cmd_wq,
412 data->cont_cmd_flag != 0)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700413 PWARN("Interrupted: exiting send_cmd loop");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700414 return -ERESTARTSYS;
415 }
416 data->cont_cmd_flag = 0;
417 cmd.cmd_type = TZ_SCHED_CMD_PENDING;
418 mutex_lock(&sb_in_lock);
Sachin Shahbae4ec02011-08-15 19:52:31 -0700419 ret = tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 sizeof(resp));
421 mutex_unlock(&sb_in_lock);
Sachin Shahbae4ec02011-08-15 19:52:31 -0700422 if (ret) {
423 PERR("tzcom_scm_call failed with err: %d", ret);
424 return ret;
425 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 }
427
428 mutex_lock(&sb_in_lock);
429 resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
430 resp.sb_in_rsp_len = req.resp_len;
Sachin Shahc3f8dd32011-06-17 11:39:10 -0700431 memcpy(req.resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
432 /* Zero out memory for security purpose */
433 memset(sb_in_virt, 0, reqd_len_sb_in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 mutex_unlock(&sb_in_lock);
435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 PDEBUG("sending cmd_req.rsp "
437 "size: %u, ptr: 0x%p", req.resp_len,
438 req.resp_buf);
439 ret = copy_to_user(argp, &req, sizeof(req));
440 if (ret) {
441 PDEBUG("copy_to_user failed");
442 return ret;
443 }
444
445 return ret;
446}
447
448static struct tzcom_registered_svc_list *__tzcom_find_svc(
449 struct tzcom_data_t *data,
450 uint32_t instance_id)
451{
452 struct tzcom_registered_svc_list *entry;
453 unsigned long flags;
454
455 spin_lock_irqsave(&data->registered_svc_list_lock, flags);
456 list_for_each_entry(entry,
457 &data->registered_svc_list_head, list) {
458 if (entry->svc.instance_id == instance_id)
459 break;
460 }
461 spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
462
463 return entry;
464}
465
466static int __tzcom_copy_cmd(struct tzcom_data_t *data,
467 struct tzcom_next_cmd_op_req *req,
468 struct tzcom_registered_svc_list *ptr_svc)
469{
470 int found = 0;
471 int ret = -EAGAIN;
Sachin Shahbae4ec02011-08-15 19:52:31 -0700472 struct tzcom_callback_list *entry, *next;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473 struct tzcom_callback *cb;
474
475 PDEBUG("In here");
476 mutex_lock(&data->callback_list_lock);
477 PDEBUG("Before looping through cmd and svc lists.");
Sachin Shahbae4ec02011-08-15 19:52:31 -0700478 list_for_each_entry_safe(entry, next, &data->callback_list_head, list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 cb = &entry->callback;
480 if (req->svc_id == ptr_svc->svc.svc_id &&
481 req->instance_id == ptr_svc->svc.instance_id &&
482 cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
483 cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
484 PDEBUG("Found matching entry");
485 found = 1;
486 if (cb->sb_out_cb_data_len <= req->req_len) {
487 PDEBUG("copying cmd buffer %p to req "
488 "buffer %p, length: %u",
489 (u8 *)cb + cb->sb_out_cb_data_off,
490 req->req_buf, cb->sb_out_cb_data_len);
491 req->cmd_id = cb->cmd_id;
492 ret = copy_to_user(req->req_buf,
493 (u8 *)cb + cb->sb_out_cb_data_off,
494 cb->sb_out_cb_data_len);
495 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700496 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 break;
498 }
499 list_del(&entry->list);
500 kfree(entry);
501 ret = 0;
502 } else {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700503 PERR("callback data buffer is "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700504 "larger than provided buffer."
505 "Required: %u, Provided: %u",
506 cb->sb_out_cb_data_len,
507 req->req_len);
508 ret = -ENOMEM;
509 }
510 break;
511 }
512 }
513 PDEBUG("After looping through cmd and svc lists.");
514 mutex_unlock(&data->callback_list_lock);
515 return ret;
516}
517
518static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
519{
520 int ret = 0;
521 struct tzcom_next_cmd_op_req req;
522 struct tzcom_registered_svc_list *this_svc;
523
524 ret = copy_from_user(&req, argp, sizeof(req));
525 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700526 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527 return ret;
528 }
529
530 if (req.instance_id > atomic_read(&svc_instance_ctr)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700531 PERR("Invalid instance_id for the request");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532 return -EINVAL;
533 }
534
535 if (!req.req_buf || req.req_len == 0) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700536 PERR("Invalid request buffer or buffer length");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700537 return -EINVAL;
538 }
539
540 PDEBUG("Before next_cmd loop");
541 this_svc = __tzcom_find_svc(data, req.instance_id);
542
543 while (1) {
544 PDEBUG("Before wait_event next_cmd.");
545 if (wait_event_interruptible(this_svc->next_cmd_wq,
546 this_svc->next_cmd_flag != 0)) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700547 PWARN("Interrupted: exiting wait_next_cmd loop");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 /* woken up for different reason */
549 return -ERESTARTSYS;
550 }
551 PDEBUG("After wait_event next_cmd.");
552 this_svc->next_cmd_flag = 0;
553
554 ret = __tzcom_copy_cmd(data, &req, this_svc);
555 if (ret == 0) {
556 PDEBUG("Successfully found svc for cmd");
557 data->handled_cmd_svc_instance_id = req.instance_id;
558 break;
559 } else if (ret == -ENOMEM) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700560 PERR("Not enough memory");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700561 return ret;
562 }
563 }
564 ret = copy_to_user(argp, &req, sizeof(req));
565 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700566 PERR("copy_to_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567 return ret;
568 }
569 PDEBUG("copy_to_user is done.");
570 return ret;
571}
572
573static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
574{
575 int ret = 0;
576 struct tzcom_cont_cmd_op_req req;
577 ret = copy_from_user(&req, argp, sizeof(req));
578 if (ret) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700579 PERR("copy_from_user failed");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 return ret;
581 }
582
583 /*
584 * Only the svc instance that handled the cmd (in read_next_cmd method)
585 * can call continue cmd
586 */
587 if (data->handled_cmd_svc_instance_id != req.instance_id) {
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700588 PWARN("Only the service instance that handled the last "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 "callback can continue cmd. "
590 "Expected: %u, Received: %u",
591 data->handled_cmd_svc_instance_id,
592 req.instance_id);
593 return -EINVAL;
594 }
595
596 if (req.resp_buf) {
597 mutex_lock(&sb_out_lock);
598 memcpy(sb_out_virt, req.resp_buf, req.resp_len);
599 mutex_unlock(&sb_out_lock);
600 }
601
602 data->cont_cmd_flag = 1;
603 wake_up_interruptible(&data->cont_cmd_wq);
604 return ret;
605}
606
607static long tzcom_ioctl(struct file *file, unsigned cmd,
608 unsigned long arg)
609{
610 int ret = 0;
611 struct tzcom_data_t *tzcom_data = file->private_data;
612 void __user *argp = (void __user *) arg;
613 PDEBUG("enter tzcom_ioctl()");
614 switch (cmd) {
615 case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
616 PDEBUG("ioctl register_service_req()");
617 ret = tzcom_register_service(tzcom_data, argp);
618 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700619 PERR("failed tzcom_register_service: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 break;
621 }
622 case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
623 PDEBUG("ioctl unregister_service_req()");
624 ret = tzcom_unregister_service(tzcom_data, argp);
625 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700626 PERR("failed tzcom_unregister_service: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700627 break;
628 }
629 case TZCOM_IOCTL_SEND_CMD_REQ: {
630 PDEBUG("ioctl send_cmd_req()");
631 /* Only one client allowed here at a time */
632 mutex_lock(&send_cmd_lock);
633 ret = tzcom_send_cmd(tzcom_data, argp);
634 mutex_unlock(&send_cmd_lock);
635 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700636 PERR("failed tzcom_send_cmd: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 break;
638 }
639 case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
640 PDEBUG("ioctl read_next_cmd_req()");
641 ret = tzcom_read_next_cmd(tzcom_data, argp);
642 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700643 PERR("failed tzcom_read_next: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700644 break;
645 }
646 case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
647 PDEBUG("ioctl continue_cmd_req()");
648 ret = tzcom_cont_cmd(tzcom_data, argp);
649 if (ret)
Sachin Shahf0b51cd2011-08-11 11:54:57 -0700650 PERR("failed tzcom_cont_cmd: %d", ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651 break;
652 }
653 default:
654 return -EINVAL;
655 }
656 return ret;
657}
658
659static int tzcom_open(struct inode *inode, struct file *file)
660{
Sachin Shahbae4ec02011-08-15 19:52:31 -0700661 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700662 long pil_error;
663 struct tz_pr_init_sb_req_s sb_out_init_req;
664 struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
665 void *rsp_addr_virt;
666 struct tzcom_command cmd;
667 struct tzcom_response resp;
668 struct tzcom_data_t *tzcom_data;
669
670 PDEBUG("In here");
671 if (pil == NULL) {
672 pil = pil_get("playrdy");
673 if (IS_ERR(pil)) {
674 PERR("Playready PIL image load failed");
675 pil_error = PTR_ERR(pil);
676 pil = NULL;
677 return pil_error;
678 }
679 PDEBUG("playrdy image loaded successfully");
680 }
681
682 sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
683 sb_out_init_req.sb_len = sb_out_length;
684 sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
685 PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
686 "sb_ptr (phys): 0x%x }",
687 sb_out_init_req.pr_cmd,
688 sb_out_init_req.sb_len,
689 sb_out_init_req.sb_ptr);
690
691 mutex_lock(&sb_in_lock);
692 PDEBUG("Before memcpy on sb_in");
693 memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
694 PDEBUG("After memcpy on sb_in");
695
696 /* It will always be a new cmd from this method */
697 cmd.cmd_type = TZ_SCHED_CMD_NEW;
698 cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
699 cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
700 PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
701 "sb_in_cmd_len: %u }",
702 cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
703
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700704 resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705
706 PDEBUG("Before scm_call for sb_init");
Sachin Shahbae4ec02011-08-15 19:52:31 -0700707 ret = tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
708 if (ret) {
709 PERR("tzcom_scm_call failed with err: %d", ret);
710 return ret;
711 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 PDEBUG("After scm_call for sb_init");
Sachin Shahbae4ec02011-08-15 19:52:31 -0700713
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700714 PDEBUG("tzcom_response after scm cmd_status: %u", resp.cmd_status);
715 if (resp.cmd_status == TZ_SCHED_STATUS_COMPLETE) {
716 resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr +
717 cmd.sb_in_cmd_len;
718 resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
719 PDEBUG("tzcom_response sb_in_rsp_addr: %p, sb_in_rsp_len: %u",
720 resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
722 resp.sb_in_rsp_addr);
723 PDEBUG("Received response phys: %p, virt: %p",
Sachin Shahf3b54ab2011-07-20 12:03:26 -0700724 resp.sb_in_rsp_addr, rsp_addr_virt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725 memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
726 } else {
727 PERR("Error with SB initialization");
728 mutex_unlock(&sb_in_lock);
729 return -EPERM;
730 }
731 mutex_unlock(&sb_in_lock);
732
733 PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
734 sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
735
736 if (sb_out_init_rsp.ret) {
737 PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
738 return -EPERM;
739 }
740
741 tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
742 if (!tzcom_data) {
743 PERR("kmalloc failed");
744 return -ENOMEM;
745 }
746 file->private_data = tzcom_data;
747
748 INIT_LIST_HEAD(&tzcom_data->callback_list_head);
749 mutex_init(&tzcom_data->callback_list_lock);
750
751 INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
752 spin_lock_init(&tzcom_data->registered_svc_list_lock);
753
754 init_waitqueue_head(&tzcom_data->cont_cmd_wq);
755 tzcom_data->cont_cmd_flag = 0;
756 tzcom_data->handled_cmd_svc_instance_id = 0;
757 return 0;
758}
759
760static int tzcom_release(struct inode *inode, struct file *file)
761{
762 struct tzcom_data_t *tzcom_data = file->private_data;
763 struct tzcom_callback_list *lcb, *ncb;
764 struct tzcom_registered_svc_list *lsvc, *nsvc;
765 PDEBUG("In here");
766
767 wake_up_all(&tzcom_data->cont_cmd_wq);
768
769 list_for_each_entry_safe(lcb, ncb,
770 &tzcom_data->callback_list_head, list) {
771 list_del(&lcb->list);
772 kfree(lcb);
773 }
774
775 list_for_each_entry_safe(lsvc, nsvc,
776 &tzcom_data->registered_svc_list_head, list) {
777 wake_up_all(&lsvc->next_cmd_wq);
778 list_del(&lsvc->list);
779 kfree(lsvc);
780 }
781
782 kfree(tzcom_data);
783 return 0;
784}
785
786static const struct file_operations tzcom_fops = {
787 .owner = THIS_MODULE,
788 .unlocked_ioctl = tzcom_ioctl,
789 .open = tzcom_open,
790 .release = tzcom_release
791};
792
793static int __init tzcom_init(void)
794{
795 int rc;
796 struct device *class_dev;
797
798 PDEBUG("Hello tzcom");
799
800 rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
801 if (rc < 0) {
802 PERR("alloc_chrdev_region failed %d", rc);
803 return rc;
804 }
805
806 driver_class = class_create(THIS_MODULE, TZCOM_DEV);
807 if (IS_ERR(driver_class)) {
808 rc = -ENOMEM;
809 PERR("class_create failed %d", rc);
810 goto unregister_chrdev_region;
811 }
812
813 class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
814 TZCOM_DEV);
815 if (!class_dev) {
816 PERR("class_device_create failed %d", rc);
817 rc = -ENOMEM;
818 goto class_destroy;
819 }
820
821 cdev_init(&tzcom_cdev, &tzcom_fops);
822 tzcom_cdev.owner = THIS_MODULE;
823
824 rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
825 if (rc < 0) {
826 PERR("cdev_add failed %d", rc);
827 goto class_device_destroy;
828 }
829
830 sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
831 PMEM_ALIGNMENT_4K);
832 if (IS_ERR((void *)sb_in_phys)) {
833 PERR("could not allocte in kernel pmem buffers for sb_in");
834 rc = -ENOMEM;
835 goto class_device_destroy;
836 }
837 PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
838
839 sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
840 sb_in_length);
841 if (!sb_in_virt) {
842 PERR("Shared buffer IN allocation failed.");
843 rc = -ENOMEM;
844 goto class_device_destroy;
845 }
846 PDEBUG("sb_in virt address: %p, phys address: 0x%x",
847 sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
848
849 sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
850 PMEM_ALIGNMENT_4K);
851 if (IS_ERR((void *)sb_out_phys)) {
852 PERR("could not allocte in kernel pmem buffers for sb_out");
853 rc = -ENOMEM;
854 goto class_device_destroy;
855 }
856 PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
857
858 sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
859 sb_out_length);
860 if (!sb_out_virt) {
861 PERR("Shared buffer OUT allocation failed.");
862 rc = -ENOMEM;
863 goto class_device_destroy;
864 }
865 PDEBUG("sb_out virt address: %p, phys address: 0x%x",
866 sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
867
868 /* Initialized in tzcom_open */
869 pil = NULL;
870
871 return 0;
872
873class_device_destroy:
874 if (sb_in_virt)
875 iounmap(sb_in_virt);
876 if (sb_in_phys)
877 pmem_kfree(sb_in_phys);
878 if (sb_out_virt)
879 iounmap(sb_out_virt);
880 if (sb_out_phys)
881 pmem_kfree(sb_out_phys);
882 device_destroy(driver_class, tzcom_device_no);
883class_destroy:
884 class_destroy(driver_class);
885unregister_chrdev_region:
886 unregister_chrdev_region(tzcom_device_no, 1);
887 return rc;
888}
889
890static void __exit tzcom_exit(void)
891{
892 PDEBUG("Goodbye tzcom");
893 if (sb_in_virt)
894 iounmap(sb_in_virt);
895 if (sb_in_phys)
896 pmem_kfree(sb_in_phys);
897 if (sb_out_virt)
898 iounmap(sb_out_virt);
899 if (sb_out_phys)
900 pmem_kfree(sb_out_phys);
901 if (pil != NULL) {
902 pil_put("playrdy");
903 pil = NULL;
904 }
905 device_destroy(driver_class, tzcom_device_no);
906 class_destroy(driver_class);
907 unregister_chrdev_region(tzcom_device_no, 1);
908}
909
910
911MODULE_LICENSE("GPL v2");
912MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
913MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
914MODULE_VERSION("1.00");
915
916module_init(tzcom_init);
917module_exit(tzcom_exit);