blob: dcc98a0077010ab29201ff7c66a305346d91782c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070034#include <linux/qcedev.h>
35#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37
38#define CACHE_LINE_SIZE 32
39#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
40
41static uint8_t _std_init_vector_sha1_uint8[] = {
42 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
43 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
44 0xC3, 0xD2, 0xE1, 0xF0
45};
46/* standard initialization vector for SHA-256, source: FIPS 180-2 */
47static uint8_t _std_init_vector_sha256_uint8[] = {
48 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
49 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
50 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
51 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
52};
53
54enum qcedev_crypto_oper_type {
55 QCEDEV_CRYPTO_OPER_CIPHER = 0,
56 QCEDEV_CRYPTO_OPER_SHA = 1,
57 QCEDEV_CRYPTO_OPER_LAST
58};
59
Mona Hossain087c60b2011-07-20 10:34:57 -070060struct qcedev_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
62struct qcedev_cipher_req {
63 struct ablkcipher_request creq;
64 void *cookie;
65};
66
67struct qcedev_sha_req {
68 struct ahash_request sreq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069 void *cookie;
70};
71
Mona Hossain087c60b2011-07-20 10:34:57 -070072struct qcedev_sha_ctxt {
73 uint32_t auth_data[4];
74 uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
75 uint32_t diglen;
76 uint8_t trailing_buf[64];
77 uint32_t trailing_buf_len;
78 uint8_t first_blk;
79 uint8_t last_blk;
80 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
81};
82
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083struct qcedev_async_req {
84 struct list_head list;
85 struct completion complete;
86 enum qcedev_crypto_oper_type op_type;
87 union {
88 struct qcedev_cipher_op_req cipher_op_req;
89 struct qcedev_sha_op_req sha_op_req;
90 };
91 union{
92 struct qcedev_cipher_req cipher_req;
93 struct qcedev_sha_req sha_req;
94 };
Mona Hossain087c60b2011-07-20 10:34:57 -070095 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 int err;
97};
98
Mona Hossain650c22c2011-07-19 09:54:19 -070099static DEFINE_MUTEX(send_cmd_lock);
100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101/**********************************************************************
102 * Register ourselves as a misc device to be able to access the dev driver
103 * from userspace. */
104
105
106#define QCEDEV_DEV "qcedev"
107
108struct qcedev_control{
109
110 /* CE features supported by platform */
111 struct msm_ce_hw_support platform_support;
112
Mona Hossain650c22c2011-07-19 09:54:19 -0700113 uint32_t ce_lock_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114 /* CE features/algorithms supported by HW engine*/
115 struct ce_hw_support ce_support;
116
117 /* misc device */
118 struct miscdevice miscdevice;
119
120 /* qce handle */
121 void *qce;
122
123 /* platform device */
124 struct platform_device *pdev;
125
126 unsigned magic;
127
128 struct list_head ready_commands;
129 struct qcedev_async_req *active_command;
130 spinlock_t lock;
131 struct tasklet_struct done_tasklet;
132};
133
Mona Hossain087c60b2011-07-20 10:34:57 -0700134struct qcedev_handle {
135 /* qcedev control handle */
136 struct qcedev_control *cntl;
137 /* qce internal sha context*/
138 struct qcedev_sha_ctxt sha_ctxt;
139};
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141/*-------------------------------------------------------------------------
142* Resource Locking Service
143* ------------------------------------------------------------------------*/
144#define QCEDEV_CMD_ID 1
145#define QCEDEV_CE_LOCK_CMD 1
146#define QCEDEV_CE_UNLOCK_CMD 0
147#define NUM_RETRY 1000
148#define CE_BUSY 55
149
150static int qcedev_scm_cmd(int resource, int cmd, int *response)
151{
152#ifdef CONFIG_MSM_SCM
153
154 struct {
155 int resource;
156 int cmd;
157 } cmd_buf;
158
159 cmd_buf.resource = resource;
160 cmd_buf.cmd = cmd;
161
162 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
163 sizeof(cmd_buf), response, sizeof(*response));
164
165#else
166 return 0;
167#endif
168}
169
170static int qcedev_unlock_ce(struct qcedev_control *podev)
171{
Mona Hossain650c22c2011-07-19 09:54:19 -0700172 int ret = 0;
173
174 mutex_lock(&send_cmd_lock);
175 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 int response = 0;
177
178 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
179 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700180 pr_err("Failed to release CE lock\n");
181 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700184 if (ret == 0) {
185 if (podev->ce_lock_count)
186 podev->ce_lock_count--;
187 else {
188 /* We should never be here */
189 ret = -EIO;
190 pr_err("CE hardware is already unlocked\n");
191 }
192 }
193 mutex_unlock(&send_cmd_lock);
194
195 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196}
197
198static int qcedev_lock_ce(struct qcedev_control *podev)
199{
Mona Hossain650c22c2011-07-19 09:54:19 -0700200 int ret = 0;
201
202 mutex_lock(&send_cmd_lock);
203 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 int response = -CE_BUSY;
205 int i = 0;
206
207 do {
208 if (qcedev_scm_cmd(
209 podev->platform_support.shared_ce_resource,
210 QCEDEV_CE_LOCK_CMD, &response)) {
211 response = -EINVAL;
212 break;
213 }
214 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
215
Mona Hossain650c22c2011-07-19 09:54:19 -0700216 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
217 ret = -EUSERS;
218 } else {
219 if (response < 0)
220 ret = -EINVAL;
221 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700223 if (ret == 0)
224 podev->ce_lock_count++;
225 mutex_unlock(&send_cmd_lock);
226 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
229#define QCEDEV_MAGIC 0x56434544 /* "qced" */
230
231static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
232static int qcedev_open(struct inode *inode, struct file *file);
233static int qcedev_release(struct inode *inode, struct file *file);
234static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700235static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236
237static const struct file_operations qcedev_fops = {
238 .owner = THIS_MODULE,
239 .unlocked_ioctl = qcedev_ioctl,
240 .open = qcedev_open,
241 .release = qcedev_release,
242};
243
244static struct qcedev_control qce_dev[] = {
245 {
246 .miscdevice = {
247 .minor = MISC_DYNAMIC_MINOR,
248 .name = "qce",
249 .fops = &qcedev_fops,
250 },
251 .magic = QCEDEV_MAGIC,
252 },
253};
254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
256#define DEBUG_MAX_FNAME 16
257#define DEBUG_MAX_RW_BUF 1024
258
259struct qcedev_stat {
260 u32 qcedev_dec_success;
261 u32 qcedev_dec_fail;
262 u32 qcedev_enc_success;
263 u32 qcedev_enc_fail;
264 u32 qcedev_sha_success;
265 u32 qcedev_sha_fail;
266};
267
268static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
269static struct dentry *_debug_dent;
270static char _debug_read_buf[DEBUG_MAX_RW_BUF];
271static int _debug_qcedev[MAX_QCE_DEVICE];
272
273static struct qcedev_control *qcedev_minor_to_control(unsigned n)
274{
275 int i;
276
277 for (i = 0; i < MAX_QCE_DEVICE; i++) {
278 if (qce_dev[i].miscdevice.minor == n)
279 return &qce_dev[i];
280 }
281 return NULL;
282}
283
284static int qcedev_open(struct inode *inode, struct file *file)
285{
Mona Hossain087c60b2011-07-20 10:34:57 -0700286 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 struct qcedev_control *podev;
288
289 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
290 if (podev == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700291 pr_err("%s: no such device %d\n", __func__,
292 MINOR(inode->i_rdev));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 return -ENOENT;
294 }
295
Mona Hossain087c60b2011-07-20 10:34:57 -0700296 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
297 if (handle == NULL) {
298 pr_err("Failed to allocate memory %ld\n",
299 PTR_ERR(handle));
300 return -ENOMEM;
301 }
302
303 handle->cntl = podev;
304 file->private_data = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
306 return 0;
307}
308
309static int qcedev_release(struct inode *inode, struct file *file)
310{
311 struct qcedev_control *podev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700312 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313
Mona Hossain087c60b2011-07-20 10:34:57 -0700314 handle = file->private_data;
315 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700317 pr_err("%s: invalid handle %p\n",
318 __func__, podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700320 kzfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 file->private_data = NULL;
322
323 return 0;
324}
325
326static void req_done(unsigned long data)
327{
328 struct qcedev_control *podev = (struct qcedev_control *)data;
329 struct qcedev_async_req *areq;
330 unsigned long flags = 0;
331 struct qcedev_async_req *new_req = NULL;
332 int ret = 0;
333
334 spin_lock_irqsave(&podev->lock, flags);
335 areq = podev->active_command;
336 podev->active_command = NULL;
337
338again:
339 if (!list_empty(&podev->ready_commands)) {
340 new_req = container_of(podev->ready_commands.next,
341 struct qcedev_async_req, list);
342 list_del(&new_req->list);
343 podev->active_command = new_req;
344 new_req->err = 0;
345 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
346 ret = start_cipher_req(podev);
347 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700348 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 }
350
351 spin_unlock_irqrestore(&podev->lock, flags);
352
353 if (areq)
354 complete(&areq->complete);
355
356 if (new_req && ret) {
357 complete(&new_req->complete);
358 spin_lock_irqsave(&podev->lock, flags);
359 podev->active_command = NULL;
360 areq = NULL;
361 ret = 0;
362 new_req = NULL;
363 goto again;
364 }
365
366 return;
367}
368
369static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
370 unsigned char *authdata, int ret)
371{
372 struct qcedev_sha_req *areq;
373 struct qcedev_control *pdev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700374 struct qcedev_handle *handle;
375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 uint32_t *auth32 = (uint32_t *)authdata;
377
378 areq = (struct qcedev_sha_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700379 handle = (struct qcedev_handle *) areq->cookie;
380 pdev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381
382 if (digest)
Mona Hossain087c60b2011-07-20 10:34:57 -0700383 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385 if (authdata) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700386 handle->sha_ctxt.auth_data[0] = auth32[0];
387 handle->sha_ctxt.auth_data[1] = auth32[1];
388 handle->sha_ctxt.auth_data[2] = auth32[2];
389 handle->sha_ctxt.auth_data[3] = auth32[3];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 }
391
392 tasklet_schedule(&pdev->done_tasklet);
393};
394
395
396static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
397 unsigned char *iv, int ret)
398{
399 struct qcedev_cipher_req *areq;
Mona Hossain087c60b2011-07-20 10:34:57 -0700400 struct qcedev_handle *handle;
401 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 struct qcedev_async_req *qcedev_areq;
403
404 areq = (struct qcedev_cipher_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700405 handle = (struct qcedev_handle *) areq->cookie;
406 podev = handle->cntl;
407 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
409 if (iv)
410 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
411 qcedev_areq->cipher_op_req.ivlen);
Mona Hossain087c60b2011-07-20 10:34:57 -0700412 tasklet_schedule(&podev->done_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413};
414
415static int start_cipher_req(struct qcedev_control *podev)
416{
417 struct qcedev_async_req *qcedev_areq;
418 struct qce_req creq;
419 int ret = 0;
420
421 /* start the command on the podev->active_command */
422 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
Mona Hossain087c60b2011-07-20 10:34:57 -0700424 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
426 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
427 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
428 else
429 creq.pmem = NULL;
430
431 switch (qcedev_areq->cipher_op_req.alg) {
432 case QCEDEV_ALG_DES:
433 creq.alg = CIPHER_ALG_DES;
434 break;
435 case QCEDEV_ALG_3DES:
436 creq.alg = CIPHER_ALG_3DES;
437 break;
438 case QCEDEV_ALG_AES:
439 creq.alg = CIPHER_ALG_AES;
440 break;
441 default:
442 break;
443 };
444
445 switch (qcedev_areq->cipher_op_req.mode) {
446 case QCEDEV_AES_MODE_CBC:
447 case QCEDEV_DES_MODE_CBC:
448 creq.mode = QCE_MODE_CBC;
449 break;
450 case QCEDEV_AES_MODE_ECB:
451 case QCEDEV_DES_MODE_ECB:
452 creq.mode = QCE_MODE_ECB;
453 break;
454 case QCEDEV_AES_MODE_CTR:
455 creq.mode = QCE_MODE_CTR;
456 break;
457 case QCEDEV_AES_MODE_XTS:
458 creq.mode = QCE_MODE_XTS;
459 break;
460 default:
461 break;
462 };
463
464 if ((creq.alg == CIPHER_ALG_AES) &&
465 (creq.mode == QCE_MODE_CTR)) {
466 creq.dir = QCE_ENCRYPT;
467 } else {
468 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
469 creq.dir = QCE_ENCRYPT;
470 else
471 creq.dir = QCE_DECRYPT;
472 }
473
474 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
475 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
476
477 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
478 creq.encklen = qcedev_areq->cipher_op_req.encklen;
479
480 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
481
482 if (qcedev_areq->cipher_op_req.encklen == 0) {
483 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
484 || (qcedev_areq->cipher_op_req.op ==
485 QCEDEV_OPER_DEC_NO_KEY))
486 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
487 else {
488 int i;
489
490 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
491 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
492 break;
493 }
494
495 if ((podev->platform_support.hw_key_support == 1) &&
496 (i == QCEDEV_MAX_KEY_SIZE))
497 creq.op = QCE_REQ_ABLK_CIPHER;
498 else {
499 ret = -EINVAL;
500 goto unsupported;
501 }
502 }
503 } else {
504 creq.op = QCE_REQ_ABLK_CIPHER;
505 }
506
507 creq.qce_cb = qcedev_cipher_req_cb;
508 creq.areq = (void *)&qcedev_areq->cipher_req;
509
510 ret = qce_ablk_cipher_req(podev->qce, &creq);
511unsupported:
512 if (ret)
513 qcedev_areq->err = -ENXIO;
514 else
515 qcedev_areq->err = 0;
516 return ret;
517};
518
Mona Hossain650c22c2011-07-19 09:54:19 -0700519static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520{
521 struct qcedev_async_req *qcedev_areq;
522 struct qce_sha_req sreq;
523 int ret = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700524 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525
526 /* start the command on the podev->active_command */
527 qcedev_areq = podev->active_command;
Mona Hossain087c60b2011-07-20 10:34:57 -0700528 handle = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 switch (qcedev_areq->sha_op_req.alg) {
531 case QCEDEV_ALG_SHA1:
532 sreq.alg = QCE_HASH_SHA1;
533 break;
534 case QCEDEV_ALG_SHA256:
535 sreq.alg = QCE_HASH_SHA256;
536 break;
537 case QCEDEV_ALG_SHA1_HMAC:
538 if (podev->ce_support.sha_hmac) {
539 sreq.alg = QCE_HASH_SHA1_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700540 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
542 } else {
543 sreq.alg = QCE_HASH_SHA1;
544 sreq.authkey = NULL;
545 }
546 break;
547 case QCEDEV_ALG_SHA256_HMAC:
548 if (podev->ce_support.sha_hmac) {
549 sreq.alg = QCE_HASH_SHA256_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700550 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551
552 } else {
553 sreq.alg = QCE_HASH_SHA256;
554 sreq.authkey = NULL;
555 }
556 break;
557 case QCEDEV_ALG_AES_CMAC:
558 sreq.alg = QCE_HASH_AES_CMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700559 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 sreq.authklen = qcedev_areq->sha_op_req.authklen;
561 break;
562 default:
563 break;
564 };
565
Mona Hossain087c60b2011-07-20 10:34:57 -0700566 qcedev_areq->sha_req.cookie = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567
568 sreq.qce_cb = qcedev_sha_req_cb;
569 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700570 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
571 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
572 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
573 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
574 sreq.digest = &handle->sha_ctxt.digest[0];
575 sreq.first_blk = handle->sha_ctxt.first_blk;
576 sreq.last_blk = handle->sha_ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 }
578 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
579 sreq.src = qcedev_areq->sha_req.sreq.src;
580 sreq.areq = (void *)&qcedev_areq->sha_req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
582 ret = qce_process_sha_req(podev->qce, &sreq);
583
584 if (ret)
585 qcedev_areq->err = -ENXIO;
586 else
587 qcedev_areq->err = 0;
588 return ret;
589};
590
591static int submit_req(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700592 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593{
Mona Hossain087c60b2011-07-20 10:34:57 -0700594 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 unsigned long flags = 0;
596 int ret = 0;
597 struct qcedev_stat *pstat;
598
599 qcedev_areq->err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700600 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601
Mona Hossain650c22c2011-07-19 09:54:19 -0700602 if (podev->platform_support.ce_shared) {
603 ret = qcedev_lock_ce(podev);
604 if (ret)
605 return ret;
606 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607
608 spin_lock_irqsave(&podev->lock, flags);
609
610 if (podev->active_command == NULL) {
611 podev->active_command = qcedev_areq;
612 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
613 ret = start_cipher_req(podev);
614 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700615 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 } else {
617 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
618 }
619
620 if (ret != 0)
621 podev->active_command = NULL;
622
623 spin_unlock_irqrestore(&podev->lock, flags);
624
625 if (ret == 0)
626 wait_for_completion(&qcedev_areq->complete);
627
Mona Hossain650c22c2011-07-19 09:54:19 -0700628 if (podev->platform_support.ce_shared)
629 ret = qcedev_unlock_ce(podev);
630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700632 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
634 pstat = &_qcedev_stat[podev->pdev->id];
635 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
636 switch (qcedev_areq->cipher_op_req.op) {
637 case QCEDEV_OPER_DEC:
638 if (qcedev_areq->err)
639 pstat->qcedev_dec_fail++;
640 else
641 pstat->qcedev_dec_success++;
642 break;
643 case QCEDEV_OPER_ENC:
644 if (qcedev_areq->err)
645 pstat->qcedev_enc_fail++;
646 else
647 pstat->qcedev_enc_success++;
648 break;
649 default:
650 break;
651 };
652 } else {
653 if (qcedev_areq->err)
654 pstat->qcedev_sha_fail++;
655 else
656 pstat->qcedev_sha_success++;
657 }
658
659 return qcedev_areq->err;
660}
661
662static int qcedev_sha_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700663 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664{
Mona Hossain087c60b2011-07-20 10:34:57 -0700665 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666
667 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
668 sha_ctxt->first_blk = 1;
669
670 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
671 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
672 memcpy(&sha_ctxt->digest[0],
673 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
674 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
675 } else {
676 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
677 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
678 memcpy(&sha_ctxt->digest[0],
679 &_std_init_vector_sha256_uint8[0],
680 SHA256_DIGEST_SIZE);
681 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
682 }
683 }
684 return 0;
685}
686
687
688static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700689 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690{
691 int err = 0;
692 int i = 0;
693 struct scatterlist sg_src[2];
694 uint32_t total;
695
696 uint8_t *user_src = NULL;
697 uint8_t *k_src = NULL;
698 uint8_t *k_buf_src = NULL;
699 uint8_t *k_align_src = NULL;
700
701 uint32_t sha_pad_len = 0;
702 uint32_t trailing_buf_len = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700703 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 uint32_t sha_block_size;
705
706 total = qcedev_areq->sha_op_req.data_len + t_buf;
707
708 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
709 sha_block_size = SHA1_BLOCK_SIZE;
710 else
711 sha_block_size = SHA256_BLOCK_SIZE;
712
713 if (total <= sha_block_size) {
714 uint32_t len = qcedev_areq->sha_op_req.data_len;
715
716 i = 0;
717
Mona Hossain087c60b2011-07-20 10:34:57 -0700718 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719
720 /* Copy data from user src(s) */
721 while (len > 0) {
722 user_src =
723 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
724 if (user_src && __copy_from_user(k_src,
725 (void __user *)user_src,
726 qcedev_areq->sha_op_req.data[i].len))
727 return -EFAULT;
728
729 len -= qcedev_areq->sha_op_req.data[i].len;
730 k_src += qcedev_areq->sha_op_req.data[i].len;
731 i++;
732 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700733 handle->sha_ctxt.trailing_buf_len = total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734
735 return 0;
736 }
737
738
739 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
740 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700741 if (k_buf_src == NULL) {
742 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
743 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700745 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700746
747 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
748 CACHE_LINE_SIZE);
749 k_src = k_align_src;
750
751 /* check for trailing buffer from previous updates and append it */
752 if (t_buf > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700753 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754 t_buf);
755 k_src += t_buf;
756 }
757
758 /* Copy data from user src(s) */
759 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
760 if (user_src && __copy_from_user(k_src,
761 (void __user *)user_src,
762 qcedev_areq->sha_op_req.data[0].len)) {
763 kfree(k_buf_src);
764 return -EFAULT;
765 }
766 k_src += qcedev_areq->sha_op_req.data[0].len;
767 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
768 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
769 if (user_src && __copy_from_user(k_src,
770 (void __user *)user_src,
771 qcedev_areq->sha_op_req.data[i].len)) {
772 kfree(k_buf_src);
773 return -EFAULT;
774 }
775 k_src += qcedev_areq->sha_op_req.data[i].len;
776 }
777
778 /* get new trailing buffer */
779 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
780 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
781
782 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
783 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
784 total-trailing_buf_len);
785 sg_mark_end(qcedev_areq->sha_req.sreq.src);
786
787 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
788
789 /* update sha_ctxt trailing buf content to new trailing buf */
790 if (trailing_buf_len > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700791 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
792 memcpy(&handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700793 (k_src - trailing_buf_len),
794 trailing_buf_len);
795 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700796 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797
Mona Hossain087c60b2011-07-20 10:34:57 -0700798 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799
Mona Hossain087c60b2011-07-20 10:34:57 -0700800 handle->sha_ctxt.last_blk = 0;
801 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700802
803 kfree(k_buf_src);
804 return err;
805}
806
807static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700808 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700809{
810 int err = 0;
811 int i = 0;
812 int j = 0;
813 int k = 0;
814 int num_entries = 0;
815 uint32_t total = 0;
816
817 /* verify address src(s) */
818 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
819 if (!access_ok(VERIFY_READ,
820 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
821 qcedev_areq->sha_op_req.data[i].len))
822 return -EFAULT;
823
824 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
825
826 struct qcedev_sha_op_req *saved_req;
827 struct qcedev_sha_op_req req;
828 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
829
830 /* save the original req structure */
831 saved_req =
832 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
833 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700834 pr_err("%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700835 __func__, (uint32_t)saved_req);
836 return -ENOMEM;
837 }
838 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
839 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
840
841 i = 0;
842 /* Address 32 KB at a time */
843 while ((i < req.entries) && (err == 0)) {
844 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
845 sreq->data[0].len = QCE_MAX_OPER_DATA;
846 if (i > 0) {
847 sreq->data[0].vaddr =
848 sreq->data[i].vaddr;
849 }
850
851 sreq->data_len = QCE_MAX_OPER_DATA;
852 sreq->entries = 1;
853
854 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700855 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856
857 sreq->data[i].len = req.data[i].len -
858 QCE_MAX_OPER_DATA;
859 sreq->data[i].vaddr = req.data[i].vaddr +
860 QCE_MAX_OPER_DATA;
861 req.data[i].vaddr = sreq->data[i].vaddr;
862 req.data[i].len = sreq->data[i].len;
863 } else {
864 total = 0;
865 for (j = i; j < req.entries; j++) {
866 num_entries++;
867 if ((total + sreq->data[j].len) >=
868 QCE_MAX_OPER_DATA) {
869 sreq->data[j].len =
870 (QCE_MAX_OPER_DATA - total);
871 total = QCE_MAX_OPER_DATA;
872 break;
873 }
874 total += sreq->data[j].len;
875 }
876
877 sreq->data_len = total;
878 if (i > 0)
879 for (k = 0; k < num_entries; k++) {
880 sreq->data[k].len =
881 sreq->data[i+k].len;
882 sreq->data[k].vaddr =
883 sreq->data[i+k].vaddr;
884 }
885 sreq->entries = num_entries;
886
887 i = j;
888 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700889 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890 num_entries = 0;
891
892 sreq->data[i].vaddr = req.data[i].vaddr +
893 sreq->data[i].len;
894 sreq->data[i].len = req.data[i].len -
895 sreq->data[i].len;
896 req.data[i].vaddr = sreq->data[i].vaddr;
897 req.data[i].len = sreq->data[i].len;
898
899 if (sreq->data[i].len == 0)
900 i++;
901 }
902 } /* end of while ((i < req.entries) && (err == 0)) */
903
904 /* Restore the original req structure */
905 for (i = 0; i < saved_req->entries; i++) {
906 sreq->data[i].len = saved_req->data[i].len;
907 sreq->data[i].vaddr = saved_req->data[i].vaddr;
908 }
909 sreq->entries = saved_req->entries;
910 sreq->data_len = saved_req->data_len;
911 kfree(saved_req);
912 } else
Mona Hossain087c60b2011-07-20 10:34:57 -0700913 err = qcedev_sha_update_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700914
915 return err;
916}
917
918static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700919 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920{
921 int err = 0;
922 struct scatterlist sg_src;
923 uint32_t total;
924
925 uint8_t *k_buf_src = NULL;
926 uint8_t *k_align_src = NULL;
927
Mona Hossain087c60b2011-07-20 10:34:57 -0700928 handle->sha_ctxt.first_blk = 0;
929 handle->sha_ctxt.last_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700930
Mona Hossain087c60b2011-07-20 10:34:57 -0700931 total = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700932
933 if (total) {
934 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
935 GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700936 if (k_buf_src == NULL) {
937 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
938 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -0700940 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700941
942 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
943 CACHE_LINE_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -0700944 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700945 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700946 handle->sha_ctxt.last_blk = 1;
947 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948
949 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
950 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
951 sg_mark_end(qcedev_areq->sha_req.sreq.src);
952
953 qcedev_areq->sha_req.sreq.nbytes = total;
954
Mona Hossain087c60b2011-07-20 10:34:57 -0700955 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956
Mona Hossain087c60b2011-07-20 10:34:57 -0700957 handle->sha_ctxt.first_blk = 0;
958 handle->sha_ctxt.last_blk = 0;
959 handle->sha_ctxt.auth_data[0] = 0;
960 handle->sha_ctxt.auth_data[1] = 0;
961 handle->sha_ctxt.trailing_buf_len = 0;
962 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963
964 kfree(k_buf_src);
965 return err;
966}
967
968static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700969 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970{
971 int err = 0;
972 int i = 0;
973 struct scatterlist sg_src[2];
974 uint32_t total;
975
976 uint8_t *user_src = NULL;
977 uint8_t *k_src = NULL;
978 uint8_t *k_buf_src = NULL;
979
980 total = qcedev_areq->sha_op_req.data_len;
981
982 /* verify address src(s) */
983 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
984 if (!access_ok(VERIFY_READ,
985 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
986 qcedev_areq->sha_op_req.data[i].len))
987 return -EFAULT;
988
989 /* Verify Source Address */
990 if (!access_ok(VERIFY_READ,
991 (void __user *)qcedev_areq->sha_op_req.authkey,
992 qcedev_areq->sha_op_req.authklen))
993 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -0700994 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995 (void __user *)qcedev_areq->sha_op_req.authkey,
996 qcedev_areq->sha_op_req.authklen))
997 return -EFAULT;
998
999
1000 k_buf_src = kmalloc(total, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001001 if (k_buf_src == NULL) {
1002 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
1003 __func__, (uint32_t)k_buf_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001005 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001006
1007 k_src = k_buf_src;
1008
1009 /* Copy data from user src(s) */
1010 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
1011 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
1012 user_src =
1013 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
1014 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
1015 qcedev_areq->sha_op_req.data[i].len)) {
1016 kfree(k_buf_src);
1017 return -EFAULT;
1018 }
1019 k_src += qcedev_areq->sha_op_req.data[i].len;
1020 }
1021
1022 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
1023 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
1024 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1025
1026 qcedev_areq->sha_req.sreq.nbytes = total;
Mona Hossain087c60b2011-07-20 10:34:57 -07001027 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
1028 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001029
1030 kfree(k_buf_src);
1031 return err;
1032}
1033
1034static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001035 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036{
1037 int err = 0;
1038
1039 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
1040 /* Verify Source Address */
1041 if (!access_ok(VERIFY_READ,
1042 (void __user *)areq->sha_op_req.authkey,
1043 areq->sha_op_req.authklen))
1044 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001045 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 (void __user *)areq->sha_op_req.authkey,
1047 areq->sha_op_req.authklen))
1048 return -EFAULT;
1049 } else {
1050 struct qcedev_async_req authkey_areq;
1051
1052 init_completion(&authkey_areq.complete);
1053
1054 authkey_areq.sha_op_req.entries = 1;
1055 authkey_areq.sha_op_req.data[0].vaddr =
1056 areq->sha_op_req.authkey;
1057 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1058 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1059 authkey_areq.sha_op_req.diglen = 0;
1060 memset(&authkey_areq.sha_op_req.digest[0], 0,
1061 QCEDEV_MAX_SHA_DIGEST);
1062 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1063 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1064 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1065 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1066
1067 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1068
Mona Hossain087c60b2011-07-20 10:34:57 -07001069 qcedev_sha_init(&authkey_areq, handle);
1070 err = qcedev_sha_update(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 if (!err)
Mona Hossain087c60b2011-07-20 10:34:57 -07001072 err = qcedev_sha_final(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 else
1074 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001075 memcpy(&handle->sha_ctxt.authkey[0],
1076 &handle->sha_ctxt.digest[0],
1077 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 }
1079 return err;
1080}
1081
1082static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001083 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084{
1085 int err = 0;
1086 struct scatterlist sg_src;
1087 uint8_t *k_src = NULL;
1088 uint32_t sha_block_size = 0;
1089 uint32_t sha_digest_size = 0;
1090
1091 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1092 sha_digest_size = SHA1_DIGEST_SIZE;
1093 sha_block_size = SHA1_BLOCK_SIZE;
1094 } else {
1095 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1096 sha_digest_size = SHA256_DIGEST_SIZE;
1097 sha_block_size = SHA256_BLOCK_SIZE;
1098 }
1099 }
1100 k_src = kmalloc(sha_block_size, GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001101 if (k_src == NULL) {
1102 pr_err("%s: Can't Allocate memory: k_src 0x%x\n",
1103 __func__, (uint32_t)k_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001105 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106
1107 /* check for trailing buffer from previous updates and append it */
Mona Hossain087c60b2011-07-20 10:34:57 -07001108 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1109 handle->sha_ctxt.trailing_buf_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001110
1111 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1112 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1113 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1114
1115 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
Mona Hossain087c60b2011-07-20 10:34:57 -07001116 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1117 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 sha_digest_size);
Mona Hossain087c60b2011-07-20 10:34:57 -07001119 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120
Mona Hossain087c60b2011-07-20 10:34:57 -07001121 handle->sha_ctxt.first_blk = 1;
1122 handle->sha_ctxt.last_blk = 0;
1123 handle->sha_ctxt.auth_data[0] = 0;
1124 handle->sha_ctxt.auth_data[1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125
1126 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001127 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001128 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001129 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001130 }
1131
1132 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001133 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001134 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001135 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136 }
Mona Hossain087c60b2011-07-20 10:34:57 -07001137 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138
Mona Hossain087c60b2011-07-20 10:34:57 -07001139 handle->sha_ctxt.last_blk = 0;
1140 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001141
1142 kfree(k_src);
1143 return err;
1144}
1145
1146static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001147 struct qcedev_handle *handle, bool ikey)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001148{
1149 int i;
1150 uint32_t constant;
1151 uint32_t sha_block_size;
1152
1153 if (ikey)
1154 constant = 0x36;
1155 else
1156 constant = 0x5c;
1157
1158 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1159 sha_block_size = SHA1_BLOCK_SIZE;
1160 else
1161 sha_block_size = SHA256_BLOCK_SIZE;
1162
Mona Hossain087c60b2011-07-20 10:34:57 -07001163 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 for (i = 0; i < sha_block_size; i++)
Mona Hossain087c60b2011-07-20 10:34:57 -07001165 handle->sha_ctxt.trailing_buf[i] =
1166 (handle->sha_ctxt.authkey[i] ^ constant);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167
Mona Hossain087c60b2011-07-20 10:34:57 -07001168 handle->sha_ctxt.trailing_buf_len = sha_block_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 return 0;
1170}
1171
1172static int qcedev_hmac_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001173 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174{
1175 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001176 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177
Mona Hossain087c60b2011-07-20 10:34:57 -07001178 qcedev_sha_init(areq, handle);
1179 err = qcedev_set_hmac_auth_key(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 if (err)
1181 return err;
1182 if (!podev->ce_support.sha_hmac)
Mona Hossain087c60b2011-07-20 10:34:57 -07001183 qcedev_hmac_update_iokey(areq, handle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 return 0;
1185}
1186
1187static int qcedev_hmac_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001188 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189{
1190 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001191 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001192
Mona Hossain087c60b2011-07-20 10:34:57 -07001193 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001194 if (podev->ce_support.sha_hmac)
1195 return err;
1196
Mona Hossain087c60b2011-07-20 10:34:57 -07001197 qcedev_hmac_update_iokey(areq, handle, false);
1198 err = qcedev_hmac_get_ohash(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001199 if (err)
1200 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001201 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202
1203 return err;
1204}
1205
1206static int qcedev_hash_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001207 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208{
1209 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1210 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001211 return qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001213 return qcedev_hmac_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001214}
1215
1216static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001217 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218{
Mona Hossain087c60b2011-07-20 10:34:57 -07001219 return qcedev_sha_update(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220}
1221
1222static int qcedev_hash_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001223 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224{
1225 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1226 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001227 return qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001228 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001229 return qcedev_hmac_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001230}
1231
1232static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001233 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234{
1235 int i = 0;
1236 int err = 0;
1237 struct scatterlist *sg_src = NULL;
1238 struct scatterlist *sg_dst = NULL;
1239 struct scatterlist *sg_ndex = NULL;
1240 struct file *file_src = NULL;
1241 struct file *file_dst = NULL;
1242 unsigned long paddr;
1243 unsigned long kvaddr;
1244 unsigned long len;
1245
1246 sg_src = kmalloc((sizeof(struct scatterlist) *
1247 areq->cipher_op_req.entries), GFP_KERNEL);
1248 if (sg_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001249 pr_err("%s: Can't Allocate memory:sg_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 __func__, (uint32_t)sg_src);
1251 return -ENOMEM;
1252
1253 }
1254 memset(sg_src, 0, (sizeof(struct scatterlist) *
1255 areq->cipher_op_req.entries));
1256 sg_ndex = sg_src;
1257 areq->cipher_req.creq.src = sg_src;
1258
1259 /* address src */
1260 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1261 &kvaddr, &len, &file_src);
1262
1263 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1264 sg_set_buf(sg_ndex,
1265 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1266 areq->cipher_op_req.pmem.src[i].len);
1267 sg_ndex++;
1268 }
1269 sg_mark_end(--sg_ndex);
1270
1271 for (i = 0; i < areq->cipher_op_req.entries; i++)
1272 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1273
1274 /* address dst */
1275 /* If not place encryption/decryption */
1276 if (areq->cipher_op_req.in_place_op != 1) {
1277 sg_dst = kmalloc((sizeof(struct scatterlist) *
1278 areq->cipher_op_req.entries), GFP_KERNEL);
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001279 if (sg_dst == NULL) {
1280 pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n",
1281 __func__, (uint32_t)sg_dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001282 return -ENOMEM;
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001283 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1285 areq->cipher_op_req.entries));
1286 areq->cipher_req.creq.dst = sg_dst;
1287 sg_ndex = sg_dst;
1288
1289 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1290 &kvaddr, &len, &file_dst);
1291 for (i = 0; i < areq->cipher_op_req.entries; i++)
1292 sg_set_buf(sg_ndex++,
1293 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1294 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1295 sg_mark_end(--sg_ndex);
1296
1297 for (i = 0; i < areq->cipher_op_req.entries; i++)
1298 areq->cipher_op_req.pmem.dst[i].offset +=
1299 (uint32_t)paddr;
1300 } else {
1301 areq->cipher_req.creq.dst = sg_src;
1302 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1303 areq->cipher_op_req.pmem.dst[i].offset =
1304 areq->cipher_op_req.pmem.src[i].offset;
1305 areq->cipher_op_req.pmem.dst[i].len =
1306 areq->cipher_op_req.pmem.src[i].len;
1307 }
1308 }
1309
1310 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1311 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1312
Mona Hossain087c60b2011-07-20 10:34:57 -07001313 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314
1315 kfree(sg_src);
1316 kfree(sg_dst);
1317
1318 if (file_dst)
1319 put_pmem_file(file_dst);
1320 if (file_src)
1321 put_pmem_file(file_src);
1322
1323 return err;
1324};
1325
1326
1327static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001328 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329{
1330 int err = 0;
1331 int i = 0;
1332 int j = 0;
1333 int k = 0;
1334 int num_entries = 0;
1335 uint32_t total = 0;
1336 struct qcedev_cipher_op_req *saved_req;
1337 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1338
1339 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1340 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001341 pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 __func__, (uint32_t)saved_req);
1343 return -ENOMEM;
1344 }
1345 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1346
1347 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1348
1349 struct qcedev_cipher_op_req req;
1350
1351 /* save the original req structure */
1352 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1353
1354 i = 0;
1355 /* Address 32 KB at a time */
1356 while ((i < req.entries) && (err == 0)) {
1357 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1358 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1359 if (i > 0) {
1360 creq->pmem.src[0].offset =
1361 creq->pmem.src[i].offset;
1362 }
1363
1364 creq->data_len = QCE_MAX_OPER_DATA;
1365 creq->entries = 1;
1366
1367 err =
1368 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001369 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370
1371 creq->pmem.src[i].len = req.pmem.src[i].len -
1372 QCE_MAX_OPER_DATA;
1373 creq->pmem.src[i].offset =
1374 req.pmem.src[i].offset +
1375 QCE_MAX_OPER_DATA;
1376 req.pmem.src[i].offset =
1377 creq->pmem.src[i].offset;
1378 req.pmem.src[i].len = creq->pmem.src[i].len;
1379 } else {
1380 total = 0;
1381 for (j = i; j < req.entries; j++) {
1382 num_entries++;
1383 if ((total + creq->pmem.src[j].len)
1384 >= QCE_MAX_OPER_DATA) {
1385 creq->pmem.src[j].len =
1386 QCE_MAX_OPER_DATA - total;
1387 total = QCE_MAX_OPER_DATA;
1388 break;
1389 }
1390 total += creq->pmem.src[j].len;
1391 }
1392
1393 creq->data_len = total;
1394 if (i > 0)
1395 for (k = 0; k < num_entries; k++) {
1396 creq->pmem.src[k].len =
1397 creq->pmem.src[i+k].len;
1398 creq->pmem.src[k].offset =
1399 creq->pmem.src[i+k].offset;
1400 }
1401 creq->entries = num_entries;
1402
1403 i = j;
1404 err =
1405 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001406 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001407 num_entries = 0;
1408
1409 creq->pmem.src[i].offset =
1410 req.pmem.src[i].offset +
1411 creq->pmem.src[i].len;
1412 creq->pmem.src[i].len =
1413 req.pmem.src[i].len -
1414 creq->pmem.src[i].len;
1415 req.pmem.src[i].offset =
1416 creq->pmem.src[i].offset;
1417 req.pmem.src[i].len =
1418 creq->pmem.src[i].len;
1419
1420 if (creq->pmem.src[i].len == 0)
1421 i++;
1422 }
1423
1424 } /* end of while ((i < req.entries) && (err == 0)) */
1425
1426 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001427 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428
1429 /* Restore the original req structure */
1430 for (i = 0; i < saved_req->entries; i++) {
1431 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1432 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1433 }
1434 creq->entries = saved_req->entries;
1435 creq->data_len = saved_req->data_len;
1436 kfree(saved_req);
1437
1438 return err;
1439
1440}
1441
1442static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001443 int *di, struct qcedev_handle *handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001444 uint8_t *k_align_src)
1445{
1446 int err = 0;
1447 int i = 0;
1448 int dst_i = *di;
1449 struct scatterlist sg_src;
1450 uint32_t byteoffset = 0;
1451 uint8_t *user_src = NULL;
1452 uint8_t *k_align_dst = k_align_src;
1453 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1454
1455
1456 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1457 byteoffset = areq->cipher_op_req.byteoffset;
1458
1459 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1460 if (user_src && __copy_from_user((k_align_src + byteoffset),
1461 (void __user *)user_src,
1462 areq->cipher_op_req.vbuf.src[0].len))
1463 return -EFAULT;
1464
1465 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1466
1467 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1468 user_src =
1469 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1470 if (user_src && __copy_from_user(k_align_src,
1471 (void __user *)user_src,
1472 areq->cipher_op_req.vbuf.src[i].len)) {
1473 return -EFAULT;
1474 }
1475 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1476 }
1477
1478 /* restore src beginning */
1479 k_align_src = k_align_dst;
1480 areq->cipher_op_req.data_len += byteoffset;
1481
1482 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1483 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1484
1485 /* In place encryption/decryption */
1486 sg_set_buf(areq->cipher_req.creq.src,
1487 k_align_dst,
1488 areq->cipher_op_req.data_len);
1489 sg_mark_end(areq->cipher_req.creq.src);
1490
1491 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1492 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1493 areq->cipher_op_req.entries = 1;
1494
Mona Hossain087c60b2011-07-20 10:34:57 -07001495 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496
1497 /* copy data to destination buffer*/
1498 creq->data_len -= byteoffset;
1499
1500 while (creq->data_len > 0) {
1501 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1502 if (err == 0 && __copy_to_user(
1503 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1504 (k_align_dst + byteoffset),
1505 creq->vbuf.dst[dst_i].len))
1506 return -EFAULT;
1507
1508 k_align_dst += creq->vbuf.dst[dst_i].len +
1509 byteoffset;
1510 creq->data_len -= creq->vbuf.dst[dst_i].len;
1511 dst_i++;
1512 } else {
1513 if (err == 0 && __copy_to_user(
1514 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1515 (k_align_dst + byteoffset),
1516 creq->data_len))
1517 return -EFAULT;
1518
1519 k_align_dst += creq->data_len;
1520 creq->vbuf.dst[dst_i].len -= creq->data_len;
1521 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1522 creq->data_len = 0;
1523 }
1524 }
1525 *di = dst_i;
1526
1527 return err;
1528};
1529
1530static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001531 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001532{
1533 int err = 0;
1534 int di = 0;
1535 int i = 0;
1536 int j = 0;
1537 int k = 0;
1538 uint32_t byteoffset = 0;
1539 int num_entries = 0;
1540 uint32_t total = 0;
1541 uint32_t len;
1542 uint8_t *k_buf_src = NULL;
1543 uint8_t *k_align_src = NULL;
1544 uint32_t max_data_xfer;
1545 struct qcedev_cipher_op_req *saved_req;
1546 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1547
1548 /* Verify Source Address's */
1549 for (i = 0; i < areq->cipher_op_req.entries; i++)
1550 if (!access_ok(VERIFY_READ,
1551 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1552 areq->cipher_op_req.vbuf.src[i].len))
1553 return -EFAULT;
1554
1555 /* Verify Destination Address's */
1556 if (areq->cipher_op_req.in_place_op != 1)
1557 for (i = 0; i < areq->cipher_op_req.entries; i++)
1558 if (!access_ok(VERIFY_READ,
1559 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1560 areq->cipher_op_req.vbuf.dst[i].len))
1561 return -EFAULT;
1562
1563 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1564 byteoffset = areq->cipher_op_req.byteoffset;
1565 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1566 GFP_KERNEL);
1567 if (k_buf_src == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001568 pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001569 __func__, (uint32_t)k_buf_src);
1570 return -ENOMEM;
1571 }
1572 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1573 CACHE_LINE_SIZE);
1574 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1575
1576 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1577 if (saved_req == NULL) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001578 pr_err("%s: Can't Allocate memory:saved_req 0x%x\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001579 __func__, (uint32_t)saved_req);
1580 kfree(k_buf_src);
1581 return -ENOMEM;
1582
1583 }
1584 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1585
1586 if (areq->cipher_op_req.data_len > max_data_xfer) {
1587 struct qcedev_cipher_op_req req;
1588
1589 /* save the original req structure */
1590 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1591
1592 i = 0;
1593 /* Address 32 KB at a time */
1594 while ((i < req.entries) && (err == 0)) {
1595 if (creq->vbuf.src[i].len > max_data_xfer) {
1596 creq->vbuf.src[0].len = max_data_xfer;
1597 if (i > 0) {
1598 creq->vbuf.src[0].vaddr =
1599 creq->vbuf.src[i].vaddr;
1600 }
1601
1602 creq->data_len = max_data_xfer;
1603 creq->entries = 1;
1604
1605 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001606 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001607 if (err < 0) {
1608 kfree(k_buf_src);
1609 kfree(saved_req);
1610 return err;
1611 }
1612
1613 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1614 max_data_xfer;
1615 creq->vbuf.src[i].vaddr =
1616 req.vbuf.src[i].vaddr +
1617 max_data_xfer;
1618 req.vbuf.src[i].vaddr =
1619 creq->vbuf.src[i].vaddr;
1620 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1621
1622 } else {
1623 total = areq->cipher_op_req.byteoffset;
1624 for (j = i; j < req.entries; j++) {
1625 num_entries++;
1626 if ((total + creq->vbuf.src[j].len)
1627 >= max_data_xfer) {
1628 creq->vbuf.src[j].len =
1629 max_data_xfer - total;
1630 total = max_data_xfer;
1631 break;
1632 }
1633 total += creq->vbuf.src[j].len;
1634 }
1635
1636 creq->data_len = total;
1637 if (i > 0)
1638 for (k = 0; k < num_entries; k++) {
1639 creq->vbuf.src[k].len =
1640 creq->vbuf.src[i+k].len;
1641 creq->vbuf.src[k].vaddr =
1642 creq->vbuf.src[i+k].vaddr;
1643 }
1644 creq->entries = num_entries;
1645
1646 i = j;
1647 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001648 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 if (err < 0) {
1650 kfree(k_buf_src);
1651 kfree(saved_req);
1652 return err;
1653 }
1654
1655 num_entries = 0;
1656 areq->cipher_op_req.byteoffset = 0;
1657
1658 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1659 + creq->vbuf.src[i].len;
1660 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1661 creq->vbuf.src[i].len;
1662
1663 req.vbuf.src[i].vaddr =
1664 creq->vbuf.src[i].vaddr;
1665 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1666
1667 if (creq->vbuf.src[i].len == 0)
1668 i++;
1669 }
1670
1671 areq->cipher_op_req.byteoffset = 0;
1672 max_data_xfer = QCE_MAX_OPER_DATA;
1673 byteoffset = 0;
1674
1675 } /* end of while ((i < req.entries) && (err == 0)) */
1676 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001677 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 k_align_src);
1679
1680 /* Restore the original req structure */
1681 for (i = 0; i < saved_req->entries; i++) {
1682 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1683 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1684 }
1685 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1686 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1687 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1688 len += saved_req->vbuf.dst[i].len;
1689 }
1690 creq->entries = saved_req->entries;
1691 creq->data_len = saved_req->data_len;
1692 creq->byteoffset = saved_req->byteoffset;
1693
1694 kfree(saved_req);
1695 kfree(k_buf_src);
1696 return err;
1697
1698}
1699
1700static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1701 struct qcedev_control *podev)
1702{
1703 if ((req->entries == 0) || (req->data_len == 0))
1704 goto error;
1705 if ((req->alg >= QCEDEV_ALG_LAST) ||
1706 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1707 goto error;
1708 if (req->alg == QCEDEV_ALG_AES) {
1709 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1710 (!podev->ce_support.aes_xts))
1711 goto error;
1712 /* if intending to use HW key make sure key fields are set
1713 * correctly and HW key is indeed supported in target
1714 */
1715 if (req->encklen == 0) {
1716 int i;
1717 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1718 if (req->enckey[i])
1719 goto error;
1720 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1721 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1722 if (!podev->platform_support.hw_key_support)
1723 goto error;
1724 } else {
1725 if (req->encklen == QCEDEV_AES_KEY_192) {
1726 if (!podev->ce_support.aes_key_192)
1727 goto error;
1728 } else {
1729 /* if not using HW key make sure key
1730 * length is valid
1731 */
1732 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1733 (req->encklen == QCEDEV_AES_KEY_256)))
1734 goto error;
1735 }
1736 }
1737 }
1738 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1739 if (req->byteoffset) {
1740 if (req->mode != QCEDEV_AES_MODE_CTR)
1741 goto error;
1742 else { /* if using CTR mode make sure not using Pmem */
1743 if (req->use_pmem)
1744 goto error;
1745 }
1746 }
1747 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1748 if (req->use_pmem) {
1749 if (!req->in_place_op)
1750 goto error;
1751 }
1752 /* Ensure zer ivlen for ECB mode */
1753 if (req->ivlen != 0) {
1754 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1755 (req->mode == QCEDEV_DES_MODE_ECB))
1756 goto error;
1757 } else {
1758 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1759 (req->mode != QCEDEV_DES_MODE_ECB))
1760 goto error;
1761 }
1762
1763 return 0;
1764error:
1765 return -EINVAL;
1766
1767}
1768
1769static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1770 struct qcedev_control *podev)
1771{
1772 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1773 (!podev->ce_support.cmac))
1774 goto sha_error;
1775
1776 if ((req->entries == 0) || (req->data_len == 0))
1777 goto sha_error;
1778
1779 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1780 goto sha_error;
1781
1782 return 0;
1783sha_error:
1784 return -EINVAL;
1785}
1786
1787static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1788{
1789 int err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -07001790 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001791 struct qcedev_control *podev;
1792 struct qcedev_async_req qcedev_areq;
1793 struct qcedev_stat *pstat;
1794
Mona Hossain087c60b2011-07-20 10:34:57 -07001795 handle = file->private_data;
1796 podev = handle->cntl;
1797 qcedev_areq.handle = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001798 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001799 pr_err("%s: invalid handle %p\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 __func__, podev);
1801 return -ENOENT;
1802 }
1803
1804 /* Verify user arguments. */
1805 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1806 return -ENOTTY;
1807
1808 init_completion(&qcedev_areq.complete);
1809 pstat = &_qcedev_stat[podev->pdev->id];
1810
1811 switch (cmd) {
1812 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001813 if (podev->platform_support.ce_shared)
1814 err = qcedev_lock_ce(podev);
1815 else
1816 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817 break;
1818 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001819 if (podev->platform_support.ce_shared)
1820 err = qcedev_unlock_ce(podev);
1821 else
1822 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001823 break;
1824 case QCEDEV_IOCTL_ENC_REQ:
1825 case QCEDEV_IOCTL_DEC_REQ:
1826 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1827 sizeof(struct qcedev_cipher_op_req)))
1828 return -EFAULT;
1829
1830 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1831 (void __user *)arg,
1832 sizeof(struct qcedev_cipher_op_req)))
1833 return -EFAULT;
1834 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1835
1836 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1837 podev))
1838 return -EINVAL;
1839
1840 if (qcedev_areq.cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
Mona Hossain087c60b2011-07-20 10:34:57 -07001841 err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001843 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844 if (err)
1845 return err;
1846 if (__copy_to_user((void __user *)arg,
1847 &qcedev_areq.cipher_op_req,
1848 sizeof(struct qcedev_cipher_op_req)))
1849 return -EFAULT;
1850 break;
1851
1852 case QCEDEV_IOCTL_SHA_INIT_REQ:
1853
1854 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1855 sizeof(struct qcedev_sha_op_req)))
1856 return -EFAULT;
1857
1858 if (__copy_from_user(&qcedev_areq.sha_op_req,
1859 (void __user *)arg,
1860 sizeof(struct qcedev_sha_op_req)))
1861 return -EFAULT;
1862 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1863 return -EINVAL;
1864 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001865 err = qcedev_hash_init(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 if (err)
1867 return err;
1868 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1869 sizeof(struct qcedev_sha_op_req)))
1870 return -EFAULT;
1871 break;
1872 case QCEDEV_IOCTL_GET_CMAC_REQ:
1873 if (!podev->ce_support.cmac)
1874 return -ENOTTY;
1875 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1876 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1877 sizeof(struct qcedev_sha_op_req)))
1878 return -EFAULT;
1879
1880 if (__copy_from_user(&qcedev_areq.sha_op_req,
1881 (void __user *)arg,
1882 sizeof(struct qcedev_sha_op_req)))
1883 return -EFAULT;
1884 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1885 return -EINVAL;
1886 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1887
1888 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001889 err = qcedev_hash_cmac(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890 if (err)
1891 return err;
1892 } else {
Mona Hossain087c60b2011-07-20 10:34:57 -07001893 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 if (err)
1895 return err;
1896 }
1897
1898 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001899 &handle->sha_ctxt.digest[0],
1900 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1902 sizeof(struct qcedev_sha_op_req)))
1903 return -EFAULT;
1904 break;
1905
1906 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1907
1908 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1909 sizeof(struct qcedev_sha_op_req)))
1910 return -EFAULT;
1911
1912 if (__copy_from_user(&qcedev_areq.sha_op_req,
1913 (void __user *)arg,
1914 sizeof(struct qcedev_sha_op_req)))
1915 return -EFAULT;
1916 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1917 return -EINVAL;
1918 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001919 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001920 if (err)
1921 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001922 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001924 &handle->sha_ctxt.digest[0],
1925 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001926 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1927 sizeof(struct qcedev_sha_op_req)))
1928 return -EFAULT;
1929 break;
1930
1931 case QCEDEV_IOCTL_GET_SHA_REQ:
1932
1933 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1934 sizeof(struct qcedev_sha_op_req)))
1935 return -EFAULT;
1936
1937 if (__copy_from_user(&qcedev_areq.sha_op_req,
1938 (void __user *)arg,
1939 sizeof(struct qcedev_sha_op_req)))
1940 return -EFAULT;
1941 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1942 return -EINVAL;
1943 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001944 qcedev_hash_init(&qcedev_areq, handle);
1945 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946 if (err)
1947 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001948 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 if (err)
1950 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001951 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001952 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001953 &handle->sha_ctxt.digest[0],
1954 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1956 sizeof(struct qcedev_sha_op_req)))
1957 return -EFAULT;
1958 break;
1959
1960 default:
1961 return -ENOTTY;
1962 }
1963
1964 return err;
1965}
1966
1967static int qcedev_probe(struct platform_device *pdev)
1968{
1969 void *handle = NULL;
1970 int rc = 0;
1971 struct qcedev_control *podev;
1972 struct msm_ce_hw_support *platform_support;
1973
1974 if (pdev->id >= MAX_QCE_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07001975 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 __func__, pdev->id, MAX_QCE_DEVICE);
1977 return -ENOENT;
1978 }
1979 podev = &qce_dev[pdev->id];
1980
1981 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
1982 podev->platform_support.ce_shared = platform_support->ce_shared;
1983 podev->platform_support.shared_ce_resource =
1984 platform_support->shared_ce_resource;
1985 podev->platform_support.hw_key_support =
1986 platform_support->hw_key_support;
Mona Hossain650c22c2011-07-19 09:54:19 -07001987 podev->ce_lock_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 INIT_LIST_HEAD(&podev->ready_commands);
1989 podev->active_command = NULL;
1990
1991 spin_lock_init(&podev->lock);
1992
1993 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
1994
1995 /* open qce */
1996 handle = qce_open(pdev, &rc);
1997 if (handle == NULL) {
1998 platform_set_drvdata(pdev, NULL);
1999 return rc;
2000 }
2001
2002 podev->qce = handle;
2003 podev->pdev = pdev;
2004 platform_set_drvdata(pdev, podev);
2005 qce_hw_support(podev->qce, &podev->ce_support);
2006 rc = misc_register(&podev->miscdevice);
2007
2008 if (rc >= 0)
2009 return 0;
2010
2011 if (handle)
2012 qce_close(handle);
2013 platform_set_drvdata(pdev, NULL);
2014 podev->qce = NULL;
2015 podev->pdev = NULL;
2016 return rc;
2017};
2018
2019static int qcedev_remove(struct platform_device *pdev)
2020{
2021 struct qcedev_control *podev;
2022
2023 podev = platform_get_drvdata(pdev);
2024 if (!podev)
2025 return 0;
2026 if (podev->qce)
2027 qce_close(podev->qce);
2028
2029 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2030 misc_deregister(&podev->miscdevice);
2031 tasklet_kill(&podev->done_tasklet);
2032 return 0;
2033};
2034
2035static struct platform_driver qcedev_plat_driver = {
2036 .probe = qcedev_probe,
2037 .remove = qcedev_remove,
2038 .driver = {
2039 .name = "qce",
2040 .owner = THIS_MODULE,
2041 },
2042};
2043
2044static int _disp_stats(int id)
2045{
2046 struct qcedev_stat *pstat;
2047 int len = 0;
2048
2049 pstat = &_qcedev_stat[id];
2050 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2051 "\nQualcomm QCE dev driver %d Statistics:\n",
2052 id + 1);
2053
2054 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2055 " Encryption operation success : %d\n",
2056 pstat->qcedev_enc_success);
2057 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2058 " Encryption operation fail : %d\n",
2059 pstat->qcedev_enc_fail);
2060 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2061 " Decryption operation success : %d\n",
2062 pstat->qcedev_dec_success);
2063
2064 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2065 " Encryption operation fail : %d\n",
2066 pstat->qcedev_dec_fail);
2067
2068 return len;
2069}
2070
2071static int _debug_stats_open(struct inode *inode, struct file *file)
2072{
2073 file->private_data = inode->i_private;
2074 return 0;
2075}
2076
2077static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2078 size_t count, loff_t *ppos)
2079{
2080 int rc = -EINVAL;
2081 int qcedev = *((int *) file->private_data);
2082 int len;
2083
2084 len = _disp_stats(qcedev);
2085
2086 rc = simple_read_from_buffer((void __user *) buf, len,
2087 ppos, (void *) _debug_read_buf, len);
2088
2089 return rc;
2090}
2091
2092static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2093 size_t count, loff_t *ppos)
2094{
2095
2096 int qcedev = *((int *) file->private_data);
2097
2098 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2099 return count;
2100};
2101
2102static const struct file_operations _debug_stats_ops = {
2103 .open = _debug_stats_open,
2104 .read = _debug_stats_read,
2105 .write = _debug_stats_write,
2106};
2107
2108static int _qcedev_debug_init(void)
2109{
2110 int rc;
2111 char name[DEBUG_MAX_FNAME];
2112 int i;
2113 struct dentry *dent;
2114
2115 _debug_dent = debugfs_create_dir("qcedev", NULL);
2116 if (IS_ERR(_debug_dent)) {
2117 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2118 PTR_ERR(_debug_dent));
2119 return PTR_ERR(_debug_dent);
2120 }
2121
2122 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2123 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2124 _debug_qcedev[i] = i;
2125 dent = debugfs_create_file(name, 0644, _debug_dent,
2126 &_debug_qcedev[i], &_debug_stats_ops);
2127 if (dent == NULL) {
2128 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2129 PTR_ERR(dent));
2130 rc = PTR_ERR(dent);
2131 goto err;
2132 }
2133 }
2134 return 0;
2135err:
2136 debugfs_remove_recursive(_debug_dent);
2137 return rc;
2138}
2139
2140static int qcedev_init(void)
2141{
2142 int rc;
2143
2144 rc = _qcedev_debug_init();
2145 if (rc)
2146 return rc;
2147 return platform_driver_register(&qcedev_plat_driver);
2148}
2149
2150static void qcedev_exit(void)
2151{
2152 debugfs_remove_recursive(_debug_dent);
2153 platform_driver_unregister(&qcedev_plat_driver);
2154}
2155
2156MODULE_LICENSE("GPL v2");
2157MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2158MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002159MODULE_VERSION("1.23");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160
2161module_init(qcedev_init);
2162module_exit(qcedev_exit);