blob: 9f3357e8b0ea869d4db6a86535bd65556c90cb1e [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070034#include <linux/qcedev.h>
35#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37
38#define CACHE_LINE_SIZE 32
39#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
40
41static uint8_t _std_init_vector_sha1_uint8[] = {
42 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
43 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
44 0xC3, 0xD2, 0xE1, 0xF0
45};
46/* standard initialization vector for SHA-256, source: FIPS 180-2 */
47static uint8_t _std_init_vector_sha256_uint8[] = {
48 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
49 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
50 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
51 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
52};
53
54enum qcedev_crypto_oper_type {
55 QCEDEV_CRYPTO_OPER_CIPHER = 0,
56 QCEDEV_CRYPTO_OPER_SHA = 1,
57 QCEDEV_CRYPTO_OPER_LAST
58};
59
Mona Hossain087c60b2011-07-20 10:34:57 -070060struct qcedev_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070061
62struct qcedev_cipher_req {
63 struct ablkcipher_request creq;
64 void *cookie;
65};
66
67struct qcedev_sha_req {
68 struct ahash_request sreq;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069 void *cookie;
70};
71
Mona Hossain087c60b2011-07-20 10:34:57 -070072struct qcedev_sha_ctxt {
73 uint32_t auth_data[4];
74 uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
75 uint32_t diglen;
76 uint8_t trailing_buf[64];
77 uint32_t trailing_buf_len;
78 uint8_t first_blk;
79 uint8_t last_blk;
80 uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
81};
82
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083struct qcedev_async_req {
84 struct list_head list;
85 struct completion complete;
86 enum qcedev_crypto_oper_type op_type;
87 union {
88 struct qcedev_cipher_op_req cipher_op_req;
89 struct qcedev_sha_op_req sha_op_req;
90 };
91 union{
92 struct qcedev_cipher_req cipher_req;
93 struct qcedev_sha_req sha_req;
94 };
Mona Hossain087c60b2011-07-20 10:34:57 -070095 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070096 int err;
97};
98
Mona Hossain650c22c2011-07-19 09:54:19 -070099static DEFINE_MUTEX(send_cmd_lock);
100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101/**********************************************************************
102 * Register ourselves as a misc device to be able to access the dev driver
103 * from userspace. */
104
105
106#define QCEDEV_DEV "qcedev"
107
108struct qcedev_control{
109
110 /* CE features supported by platform */
111 struct msm_ce_hw_support platform_support;
112
Mona Hossain650c22c2011-07-19 09:54:19 -0700113 uint32_t ce_lock_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114 /* CE features/algorithms supported by HW engine*/
115 struct ce_hw_support ce_support;
116
117 /* misc device */
118 struct miscdevice miscdevice;
119
120 /* qce handle */
121 void *qce;
122
123 /* platform device */
124 struct platform_device *pdev;
125
126 unsigned magic;
127
128 struct list_head ready_commands;
129 struct qcedev_async_req *active_command;
130 spinlock_t lock;
131 struct tasklet_struct done_tasklet;
132};
133
Mona Hossain087c60b2011-07-20 10:34:57 -0700134struct qcedev_handle {
135 /* qcedev control handle */
136 struct qcedev_control *cntl;
137 /* qce internal sha context*/
138 struct qcedev_sha_ctxt sha_ctxt;
139};
140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141/*-------------------------------------------------------------------------
142* Resource Locking Service
143* ------------------------------------------------------------------------*/
144#define QCEDEV_CMD_ID 1
145#define QCEDEV_CE_LOCK_CMD 1
146#define QCEDEV_CE_UNLOCK_CMD 0
147#define NUM_RETRY 1000
148#define CE_BUSY 55
149
150static int qcedev_scm_cmd(int resource, int cmd, int *response)
151{
152#ifdef CONFIG_MSM_SCM
153
154 struct {
155 int resource;
156 int cmd;
157 } cmd_buf;
158
159 cmd_buf.resource = resource;
160 cmd_buf.cmd = cmd;
161
162 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
163 sizeof(cmd_buf), response, sizeof(*response));
164
165#else
166 return 0;
167#endif
168}
169
170static int qcedev_unlock_ce(struct qcedev_control *podev)
171{
Mona Hossain650c22c2011-07-19 09:54:19 -0700172 int ret = 0;
173
174 mutex_lock(&send_cmd_lock);
175 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 int response = 0;
177
178 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
179 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700180 pr_err("Failed to release CE lock\n");
181 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700184 if (ret == 0) {
185 if (podev->ce_lock_count)
186 podev->ce_lock_count--;
187 else {
188 /* We should never be here */
189 ret = -EIO;
190 pr_err("CE hardware is already unlocked\n");
191 }
192 }
193 mutex_unlock(&send_cmd_lock);
194
195 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196}
197
198static int qcedev_lock_ce(struct qcedev_control *podev)
199{
Mona Hossain650c22c2011-07-19 09:54:19 -0700200 int ret = 0;
201
202 mutex_lock(&send_cmd_lock);
203 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 int response = -CE_BUSY;
205 int i = 0;
206
207 do {
208 if (qcedev_scm_cmd(
209 podev->platform_support.shared_ce_resource,
210 QCEDEV_CE_LOCK_CMD, &response)) {
211 response = -EINVAL;
212 break;
213 }
214 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
215
Mona Hossain650c22c2011-07-19 09:54:19 -0700216 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
217 ret = -EUSERS;
218 } else {
219 if (response < 0)
220 ret = -EINVAL;
221 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700223 if (ret == 0)
224 podev->ce_lock_count++;
225 mutex_unlock(&send_cmd_lock);
226 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227}
228
229#define QCEDEV_MAGIC 0x56434544 /* "qced" */
230
231static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
232static int qcedev_open(struct inode *inode, struct file *file);
233static int qcedev_release(struct inode *inode, struct file *file);
234static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700235static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236
237static const struct file_operations qcedev_fops = {
238 .owner = THIS_MODULE,
239 .unlocked_ioctl = qcedev_ioctl,
240 .open = qcedev_open,
241 .release = qcedev_release,
242};
243
244static struct qcedev_control qce_dev[] = {
245 {
246 .miscdevice = {
247 .minor = MISC_DYNAMIC_MINOR,
248 .name = "qce",
249 .fops = &qcedev_fops,
250 },
251 .magic = QCEDEV_MAGIC,
252 },
253};
254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
256#define DEBUG_MAX_FNAME 16
257#define DEBUG_MAX_RW_BUF 1024
258
259struct qcedev_stat {
260 u32 qcedev_dec_success;
261 u32 qcedev_dec_fail;
262 u32 qcedev_enc_success;
263 u32 qcedev_enc_fail;
264 u32 qcedev_sha_success;
265 u32 qcedev_sha_fail;
266};
267
268static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
269static struct dentry *_debug_dent;
270static char _debug_read_buf[DEBUG_MAX_RW_BUF];
271static int _debug_qcedev[MAX_QCE_DEVICE];
272
273static struct qcedev_control *qcedev_minor_to_control(unsigned n)
274{
275 int i;
276
277 for (i = 0; i < MAX_QCE_DEVICE; i++) {
278 if (qce_dev[i].miscdevice.minor == n)
279 return &qce_dev[i];
280 }
281 return NULL;
282}
283
284static int qcedev_open(struct inode *inode, struct file *file)
285{
Mona Hossain087c60b2011-07-20 10:34:57 -0700286 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 struct qcedev_control *podev;
288
289 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
290 if (podev == NULL) {
291 printk(KERN_ERR "%s: no such device %d\n", __func__,
292 MINOR(inode->i_rdev));
293 return -ENOENT;
294 }
295
Mona Hossain087c60b2011-07-20 10:34:57 -0700296 handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
297 if (handle == NULL) {
298 pr_err("Failed to allocate memory %ld\n",
299 PTR_ERR(handle));
300 return -ENOMEM;
301 }
302
303 handle->cntl = podev;
304 file->private_data = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700305
306 return 0;
307}
308
309static int qcedev_release(struct inode *inode, struct file *file)
310{
311 struct qcedev_control *podev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700312 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313
Mona Hossain087c60b2011-07-20 10:34:57 -0700314 handle = file->private_data;
315 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
317 printk(KERN_ERR "%s: invalid handle %p\n",
318 __func__, podev);
319 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700320 kzfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 file->private_data = NULL;
322
323 return 0;
324}
325
326static void req_done(unsigned long data)
327{
328 struct qcedev_control *podev = (struct qcedev_control *)data;
329 struct qcedev_async_req *areq;
330 unsigned long flags = 0;
331 struct qcedev_async_req *new_req = NULL;
332 int ret = 0;
333
334 spin_lock_irqsave(&podev->lock, flags);
335 areq = podev->active_command;
336 podev->active_command = NULL;
337
338again:
339 if (!list_empty(&podev->ready_commands)) {
340 new_req = container_of(podev->ready_commands.next,
341 struct qcedev_async_req, list);
342 list_del(&new_req->list);
343 podev->active_command = new_req;
344 new_req->err = 0;
345 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
346 ret = start_cipher_req(podev);
347 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700348 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 }
350
351 spin_unlock_irqrestore(&podev->lock, flags);
352
353 if (areq)
354 complete(&areq->complete);
355
356 if (new_req && ret) {
357 complete(&new_req->complete);
358 spin_lock_irqsave(&podev->lock, flags);
359 podev->active_command = NULL;
360 areq = NULL;
361 ret = 0;
362 new_req = NULL;
363 goto again;
364 }
365
366 return;
367}
368
369static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
370 unsigned char *authdata, int ret)
371{
372 struct qcedev_sha_req *areq;
373 struct qcedev_control *pdev;
Mona Hossain087c60b2011-07-20 10:34:57 -0700374 struct qcedev_handle *handle;
375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 uint32_t *auth32 = (uint32_t *)authdata;
377
378 areq = (struct qcedev_sha_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700379 handle = (struct qcedev_handle *) areq->cookie;
380 pdev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381
382 if (digest)
Mona Hossain087c60b2011-07-20 10:34:57 -0700383 memcpy(&handle->sha_ctxt.digest[0], digest, 32);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700384
385 if (authdata) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700386 handle->sha_ctxt.auth_data[0] = auth32[0];
387 handle->sha_ctxt.auth_data[1] = auth32[1];
388 handle->sha_ctxt.auth_data[2] = auth32[2];
389 handle->sha_ctxt.auth_data[3] = auth32[3];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390 }
391
392 tasklet_schedule(&pdev->done_tasklet);
393};
394
395
396static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
397 unsigned char *iv, int ret)
398{
399 struct qcedev_cipher_req *areq;
Mona Hossain087c60b2011-07-20 10:34:57 -0700400 struct qcedev_handle *handle;
401 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 struct qcedev_async_req *qcedev_areq;
403
404 areq = (struct qcedev_cipher_req *) cookie;
Mona Hossain087c60b2011-07-20 10:34:57 -0700405 handle = (struct qcedev_handle *) areq->cookie;
406 podev = handle->cntl;
407 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700408
409 if (iv)
410 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
411 qcedev_areq->cipher_op_req.ivlen);
Mona Hossain087c60b2011-07-20 10:34:57 -0700412 tasklet_schedule(&podev->done_tasklet);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413};
414
415static int start_cipher_req(struct qcedev_control *podev)
416{
417 struct qcedev_async_req *qcedev_areq;
418 struct qce_req creq;
419 int ret = 0;
420
421 /* start the command on the podev->active_command */
422 qcedev_areq = podev->active_command;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
Mona Hossain087c60b2011-07-20 10:34:57 -0700424 qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
426 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
427 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
428 else
429 creq.pmem = NULL;
430
431 switch (qcedev_areq->cipher_op_req.alg) {
432 case QCEDEV_ALG_DES:
433 creq.alg = CIPHER_ALG_DES;
434 break;
435 case QCEDEV_ALG_3DES:
436 creq.alg = CIPHER_ALG_3DES;
437 break;
438 case QCEDEV_ALG_AES:
439 creq.alg = CIPHER_ALG_AES;
440 break;
441 default:
442 break;
443 };
444
445 switch (qcedev_areq->cipher_op_req.mode) {
446 case QCEDEV_AES_MODE_CBC:
447 case QCEDEV_DES_MODE_CBC:
448 creq.mode = QCE_MODE_CBC;
449 break;
450 case QCEDEV_AES_MODE_ECB:
451 case QCEDEV_DES_MODE_ECB:
452 creq.mode = QCE_MODE_ECB;
453 break;
454 case QCEDEV_AES_MODE_CTR:
455 creq.mode = QCE_MODE_CTR;
456 break;
457 case QCEDEV_AES_MODE_XTS:
458 creq.mode = QCE_MODE_XTS;
459 break;
460 default:
461 break;
462 };
463
464 if ((creq.alg == CIPHER_ALG_AES) &&
465 (creq.mode == QCE_MODE_CTR)) {
466 creq.dir = QCE_ENCRYPT;
467 } else {
468 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
469 creq.dir = QCE_ENCRYPT;
470 else
471 creq.dir = QCE_DECRYPT;
472 }
473
474 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
475 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
476
477 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
478 creq.encklen = qcedev_areq->cipher_op_req.encklen;
479
480 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
481
482 if (qcedev_areq->cipher_op_req.encklen == 0) {
483 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
484 || (qcedev_areq->cipher_op_req.op ==
485 QCEDEV_OPER_DEC_NO_KEY))
486 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
487 else {
488 int i;
489
490 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
491 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
492 break;
493 }
494
495 if ((podev->platform_support.hw_key_support == 1) &&
496 (i == QCEDEV_MAX_KEY_SIZE))
497 creq.op = QCE_REQ_ABLK_CIPHER;
498 else {
499 ret = -EINVAL;
500 goto unsupported;
501 }
502 }
503 } else {
504 creq.op = QCE_REQ_ABLK_CIPHER;
505 }
506
507 creq.qce_cb = qcedev_cipher_req_cb;
508 creq.areq = (void *)&qcedev_areq->cipher_req;
509
510 ret = qce_ablk_cipher_req(podev->qce, &creq);
511unsupported:
512 if (ret)
513 qcedev_areq->err = -ENXIO;
514 else
515 qcedev_areq->err = 0;
516 return ret;
517};
518
Mona Hossain650c22c2011-07-19 09:54:19 -0700519static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520{
521 struct qcedev_async_req *qcedev_areq;
522 struct qce_sha_req sreq;
523 int ret = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700524 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525
526 /* start the command on the podev->active_command */
527 qcedev_areq = podev->active_command;
Mona Hossain087c60b2011-07-20 10:34:57 -0700528 handle = qcedev_areq->handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529
530 switch (qcedev_areq->sha_op_req.alg) {
531 case QCEDEV_ALG_SHA1:
532 sreq.alg = QCE_HASH_SHA1;
533 break;
534 case QCEDEV_ALG_SHA256:
535 sreq.alg = QCE_HASH_SHA256;
536 break;
537 case QCEDEV_ALG_SHA1_HMAC:
538 if (podev->ce_support.sha_hmac) {
539 sreq.alg = QCE_HASH_SHA1_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700540 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541
542 } else {
543 sreq.alg = QCE_HASH_SHA1;
544 sreq.authkey = NULL;
545 }
546 break;
547 case QCEDEV_ALG_SHA256_HMAC:
548 if (podev->ce_support.sha_hmac) {
549 sreq.alg = QCE_HASH_SHA256_HMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700550 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551
552 } else {
553 sreq.alg = QCE_HASH_SHA256;
554 sreq.authkey = NULL;
555 }
556 break;
557 case QCEDEV_ALG_AES_CMAC:
558 sreq.alg = QCE_HASH_AES_CMAC;
Mona Hossain087c60b2011-07-20 10:34:57 -0700559 sreq.authkey = &handle->sha_ctxt.authkey[0];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560 sreq.authklen = qcedev_areq->sha_op_req.authklen;
561 break;
562 default:
563 break;
564 };
565
Mona Hossain087c60b2011-07-20 10:34:57 -0700566 qcedev_areq->sha_req.cookie = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700567
568 sreq.qce_cb = qcedev_sha_req_cb;
569 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700570 sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
571 sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
572 sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
573 sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
574 sreq.digest = &handle->sha_ctxt.digest[0];
575 sreq.first_blk = handle->sha_ctxt.first_blk;
576 sreq.last_blk = handle->sha_ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 }
578 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
579 sreq.src = qcedev_areq->sha_req.sreq.src;
580 sreq.areq = (void *)&qcedev_areq->sha_req;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581
582 ret = qce_process_sha_req(podev->qce, &sreq);
583
584 if (ret)
585 qcedev_areq->err = -ENXIO;
586 else
587 qcedev_areq->err = 0;
588 return ret;
589};
590
591static int submit_req(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700592 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700593{
Mona Hossain087c60b2011-07-20 10:34:57 -0700594 struct qcedev_control *podev;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 unsigned long flags = 0;
596 int ret = 0;
597 struct qcedev_stat *pstat;
598
599 qcedev_areq->err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700600 podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601
Mona Hossain650c22c2011-07-19 09:54:19 -0700602 if (podev->platform_support.ce_shared) {
603 ret = qcedev_lock_ce(podev);
604 if (ret)
605 return ret;
606 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607
608 spin_lock_irqsave(&podev->lock, flags);
609
610 if (podev->active_command == NULL) {
611 podev->active_command = qcedev_areq;
612 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
613 ret = start_cipher_req(podev);
614 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700615 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 } else {
617 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
618 }
619
620 if (ret != 0)
621 podev->active_command = NULL;
622
623 spin_unlock_irqrestore(&podev->lock, flags);
624
625 if (ret == 0)
626 wait_for_completion(&qcedev_areq->complete);
627
Mona Hossain650c22c2011-07-19 09:54:19 -0700628 if (podev->platform_support.ce_shared)
629 ret = qcedev_unlock_ce(podev);
630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700631 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700632 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700633
634 pstat = &_qcedev_stat[podev->pdev->id];
635 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
636 switch (qcedev_areq->cipher_op_req.op) {
637 case QCEDEV_OPER_DEC:
638 if (qcedev_areq->err)
639 pstat->qcedev_dec_fail++;
640 else
641 pstat->qcedev_dec_success++;
642 break;
643 case QCEDEV_OPER_ENC:
644 if (qcedev_areq->err)
645 pstat->qcedev_enc_fail++;
646 else
647 pstat->qcedev_enc_success++;
648 break;
649 default:
650 break;
651 };
652 } else {
653 if (qcedev_areq->err)
654 pstat->qcedev_sha_fail++;
655 else
656 pstat->qcedev_sha_success++;
657 }
658
659 return qcedev_areq->err;
660}
661
662static int qcedev_sha_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700663 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664{
Mona Hossain087c60b2011-07-20 10:34:57 -0700665 struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666
667 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
668 sha_ctxt->first_blk = 1;
669
670 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
671 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
672 memcpy(&sha_ctxt->digest[0],
673 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
674 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
675 } else {
676 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
677 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
678 memcpy(&sha_ctxt->digest[0],
679 &_std_init_vector_sha256_uint8[0],
680 SHA256_DIGEST_SIZE);
681 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
682 }
683 }
684 return 0;
685}
686
687
688static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700689 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690{
691 int err = 0;
692 int i = 0;
693 struct scatterlist sg_src[2];
694 uint32_t total;
695
696 uint8_t *user_src = NULL;
697 uint8_t *k_src = NULL;
698 uint8_t *k_buf_src = NULL;
699 uint8_t *k_align_src = NULL;
700
701 uint32_t sha_pad_len = 0;
702 uint32_t trailing_buf_len = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -0700703 uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 uint32_t sha_block_size;
705
706 total = qcedev_areq->sha_op_req.data_len + t_buf;
707
708 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
709 sha_block_size = SHA1_BLOCK_SIZE;
710 else
711 sha_block_size = SHA256_BLOCK_SIZE;
712
713 if (total <= sha_block_size) {
714 uint32_t len = qcedev_areq->sha_op_req.data_len;
715
716 i = 0;
717
Mona Hossain087c60b2011-07-20 10:34:57 -0700718 k_src = &handle->sha_ctxt.trailing_buf[t_buf];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719
720 /* Copy data from user src(s) */
721 while (len > 0) {
722 user_src =
723 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
724 if (user_src && __copy_from_user(k_src,
725 (void __user *)user_src,
726 qcedev_areq->sha_op_req.data[i].len))
727 return -EFAULT;
728
729 len -= qcedev_areq->sha_op_req.data[i].len;
730 k_src += qcedev_areq->sha_op_req.data[i].len;
731 i++;
732 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700733 handle->sha_ctxt.trailing_buf_len = total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734
735 return 0;
736 }
737
738
739 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
740 GFP_KERNEL);
741 if (k_buf_src == NULL)
742 return -ENOMEM;
743
744 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
745 CACHE_LINE_SIZE);
746 k_src = k_align_src;
747
748 /* check for trailing buffer from previous updates and append it */
749 if (t_buf > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700750 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 t_buf);
752 k_src += t_buf;
753 }
754
755 /* Copy data from user src(s) */
756 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
757 if (user_src && __copy_from_user(k_src,
758 (void __user *)user_src,
759 qcedev_areq->sha_op_req.data[0].len)) {
760 kfree(k_buf_src);
761 return -EFAULT;
762 }
763 k_src += qcedev_areq->sha_op_req.data[0].len;
764 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
765 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
766 if (user_src && __copy_from_user(k_src,
767 (void __user *)user_src,
768 qcedev_areq->sha_op_req.data[i].len)) {
769 kfree(k_buf_src);
770 return -EFAULT;
771 }
772 k_src += qcedev_areq->sha_op_req.data[i].len;
773 }
774
775 /* get new trailing buffer */
776 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
777 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
778
779 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
780 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
781 total-trailing_buf_len);
782 sg_mark_end(qcedev_areq->sha_req.sreq.src);
783
784 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
785
786 /* update sha_ctxt trailing buf content to new trailing buf */
787 if (trailing_buf_len > 0) {
Mona Hossain087c60b2011-07-20 10:34:57 -0700788 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
789 memcpy(&handle->sha_ctxt.trailing_buf[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700790 (k_src - trailing_buf_len),
791 trailing_buf_len);
792 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700793 handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794
Mona Hossain087c60b2011-07-20 10:34:57 -0700795 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700796
Mona Hossain087c60b2011-07-20 10:34:57 -0700797 handle->sha_ctxt.last_blk = 0;
798 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799
800 kfree(k_buf_src);
801 return err;
802}
803
804static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700805 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806{
807 int err = 0;
808 int i = 0;
809 int j = 0;
810 int k = 0;
811 int num_entries = 0;
812 uint32_t total = 0;
813
814 /* verify address src(s) */
815 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
816 if (!access_ok(VERIFY_READ,
817 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
818 qcedev_areq->sha_op_req.data[i].len))
819 return -EFAULT;
820
821 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
822
823 struct qcedev_sha_op_req *saved_req;
824 struct qcedev_sha_op_req req;
825 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
826
827 /* save the original req structure */
828 saved_req =
829 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
830 if (saved_req == NULL) {
831 printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
832 __func__, (uint32_t)saved_req);
833 return -ENOMEM;
834 }
835 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
836 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
837
838 i = 0;
839 /* Address 32 KB at a time */
840 while ((i < req.entries) && (err == 0)) {
841 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
842 sreq->data[0].len = QCE_MAX_OPER_DATA;
843 if (i > 0) {
844 sreq->data[0].vaddr =
845 sreq->data[i].vaddr;
846 }
847
848 sreq->data_len = QCE_MAX_OPER_DATA;
849 sreq->entries = 1;
850
851 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700852 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700853
854 sreq->data[i].len = req.data[i].len -
855 QCE_MAX_OPER_DATA;
856 sreq->data[i].vaddr = req.data[i].vaddr +
857 QCE_MAX_OPER_DATA;
858 req.data[i].vaddr = sreq->data[i].vaddr;
859 req.data[i].len = sreq->data[i].len;
860 } else {
861 total = 0;
862 for (j = i; j < req.entries; j++) {
863 num_entries++;
864 if ((total + sreq->data[j].len) >=
865 QCE_MAX_OPER_DATA) {
866 sreq->data[j].len =
867 (QCE_MAX_OPER_DATA - total);
868 total = QCE_MAX_OPER_DATA;
869 break;
870 }
871 total += sreq->data[j].len;
872 }
873
874 sreq->data_len = total;
875 if (i > 0)
876 for (k = 0; k < num_entries; k++) {
877 sreq->data[k].len =
878 sreq->data[i+k].len;
879 sreq->data[k].vaddr =
880 sreq->data[i+k].vaddr;
881 }
882 sreq->entries = num_entries;
883
884 i = j;
885 err = qcedev_sha_update_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700886 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 num_entries = 0;
888
889 sreq->data[i].vaddr = req.data[i].vaddr +
890 sreq->data[i].len;
891 sreq->data[i].len = req.data[i].len -
892 sreq->data[i].len;
893 req.data[i].vaddr = sreq->data[i].vaddr;
894 req.data[i].len = sreq->data[i].len;
895
896 if (sreq->data[i].len == 0)
897 i++;
898 }
899 } /* end of while ((i < req.entries) && (err == 0)) */
900
901 /* Restore the original req structure */
902 for (i = 0; i < saved_req->entries; i++) {
903 sreq->data[i].len = saved_req->data[i].len;
904 sreq->data[i].vaddr = saved_req->data[i].vaddr;
905 }
906 sreq->entries = saved_req->entries;
907 sreq->data_len = saved_req->data_len;
908 kfree(saved_req);
909 } else
Mona Hossain087c60b2011-07-20 10:34:57 -0700910 err = qcedev_sha_update_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911
912 return err;
913}
914
915static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700916 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917{
918 int err = 0;
919 struct scatterlist sg_src;
920 uint32_t total;
921
922 uint8_t *k_buf_src = NULL;
923 uint8_t *k_align_src = NULL;
924
Mona Hossain087c60b2011-07-20 10:34:57 -0700925 handle->sha_ctxt.first_blk = 0;
926 handle->sha_ctxt.last_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700927
Mona Hossain087c60b2011-07-20 10:34:57 -0700928 total = handle->sha_ctxt.trailing_buf_len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929
930 if (total) {
931 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
932 GFP_KERNEL);
933 if (k_buf_src == NULL)
934 return -ENOMEM;
935
936 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
937 CACHE_LINE_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -0700938 memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 }
Mona Hossain087c60b2011-07-20 10:34:57 -0700940 handle->sha_ctxt.last_blk = 1;
941 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700942
943 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
944 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
945 sg_mark_end(qcedev_areq->sha_req.sreq.src);
946
947 qcedev_areq->sha_req.sreq.nbytes = total;
948
Mona Hossain087c60b2011-07-20 10:34:57 -0700949 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950
Mona Hossain087c60b2011-07-20 10:34:57 -0700951 handle->sha_ctxt.first_blk = 0;
952 handle->sha_ctxt.last_blk = 0;
953 handle->sha_ctxt.auth_data[0] = 0;
954 handle->sha_ctxt.auth_data[1] = 0;
955 handle->sha_ctxt.trailing_buf_len = 0;
956 memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700957
958 kfree(k_buf_src);
959 return err;
960}
961
962static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -0700963 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700964{
965 int err = 0;
966 int i = 0;
967 struct scatterlist sg_src[2];
968 uint32_t total;
969
970 uint8_t *user_src = NULL;
971 uint8_t *k_src = NULL;
972 uint8_t *k_buf_src = NULL;
973
974 total = qcedev_areq->sha_op_req.data_len;
975
976 /* verify address src(s) */
977 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
978 if (!access_ok(VERIFY_READ,
979 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
980 qcedev_areq->sha_op_req.data[i].len))
981 return -EFAULT;
982
983 /* Verify Source Address */
984 if (!access_ok(VERIFY_READ,
985 (void __user *)qcedev_areq->sha_op_req.authkey,
986 qcedev_areq->sha_op_req.authklen))
987 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -0700988 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 (void __user *)qcedev_areq->sha_op_req.authkey,
990 qcedev_areq->sha_op_req.authklen))
991 return -EFAULT;
992
993
994 k_buf_src = kmalloc(total, GFP_KERNEL);
995 if (k_buf_src == NULL)
996 return -ENOMEM;
997
998 k_src = k_buf_src;
999
1000 /* Copy data from user src(s) */
1001 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
1002 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
1003 user_src =
1004 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
1005 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
1006 qcedev_areq->sha_op_req.data[i].len)) {
1007 kfree(k_buf_src);
1008 return -EFAULT;
1009 }
1010 k_src += qcedev_areq->sha_op_req.data[i].len;
1011 }
1012
1013 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
1014 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
1015 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1016
1017 qcedev_areq->sha_req.sreq.nbytes = total;
Mona Hossain087c60b2011-07-20 10:34:57 -07001018 handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
1019 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020
1021 kfree(k_buf_src);
1022 return err;
1023}
1024
1025static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001026 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001027{
1028 int err = 0;
1029
1030 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
1031 /* Verify Source Address */
1032 if (!access_ok(VERIFY_READ,
1033 (void __user *)areq->sha_op_req.authkey,
1034 areq->sha_op_req.authklen))
1035 return -EFAULT;
Mona Hossain087c60b2011-07-20 10:34:57 -07001036 if (__copy_from_user(&handle->sha_ctxt.authkey[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001037 (void __user *)areq->sha_op_req.authkey,
1038 areq->sha_op_req.authklen))
1039 return -EFAULT;
1040 } else {
1041 struct qcedev_async_req authkey_areq;
1042
1043 init_completion(&authkey_areq.complete);
1044
1045 authkey_areq.sha_op_req.entries = 1;
1046 authkey_areq.sha_op_req.data[0].vaddr =
1047 areq->sha_op_req.authkey;
1048 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1049 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1050 authkey_areq.sha_op_req.diglen = 0;
1051 memset(&authkey_areq.sha_op_req.digest[0], 0,
1052 QCEDEV_MAX_SHA_DIGEST);
1053 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1054 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1055 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1056 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1057
1058 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1059
Mona Hossain087c60b2011-07-20 10:34:57 -07001060 qcedev_sha_init(&authkey_areq, handle);
1061 err = qcedev_sha_update(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 if (!err)
Mona Hossain087c60b2011-07-20 10:34:57 -07001063 err = qcedev_sha_final(&authkey_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001064 else
1065 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001066 memcpy(&handle->sha_ctxt.authkey[0],
1067 &handle->sha_ctxt.digest[0],
1068 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 }
1070 return err;
1071}
1072
1073static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001074 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075{
1076 int err = 0;
1077 struct scatterlist sg_src;
1078 uint8_t *k_src = NULL;
1079 uint32_t sha_block_size = 0;
1080 uint32_t sha_digest_size = 0;
1081
1082 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1083 sha_digest_size = SHA1_DIGEST_SIZE;
1084 sha_block_size = SHA1_BLOCK_SIZE;
1085 } else {
1086 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1087 sha_digest_size = SHA256_DIGEST_SIZE;
1088 sha_block_size = SHA256_BLOCK_SIZE;
1089 }
1090 }
1091 k_src = kmalloc(sha_block_size, GFP_KERNEL);
1092 if (k_src == NULL)
1093 return -ENOMEM;
1094
1095 /* check for trailing buffer from previous updates and append it */
Mona Hossain087c60b2011-07-20 10:34:57 -07001096 memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
1097 handle->sha_ctxt.trailing_buf_len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001098
1099 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1100 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1101 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1102
1103 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
Mona Hossain087c60b2011-07-20 10:34:57 -07001104 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
1105 memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 sha_digest_size);
Mona Hossain087c60b2011-07-20 10:34:57 -07001107 handle->sha_ctxt.trailing_buf_len = sha_digest_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108
Mona Hossain087c60b2011-07-20 10:34:57 -07001109 handle->sha_ctxt.first_blk = 1;
1110 handle->sha_ctxt.last_blk = 0;
1111 handle->sha_ctxt.auth_data[0] = 0;
1112 handle->sha_ctxt.auth_data[1] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113
1114 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001115 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001117 handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 }
1119
1120 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001121 memcpy(&handle->sha_ctxt.digest[0],
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
Mona Hossain087c60b2011-07-20 10:34:57 -07001123 handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 }
Mona Hossain087c60b2011-07-20 10:34:57 -07001125 err = submit_req(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001126
Mona Hossain087c60b2011-07-20 10:34:57 -07001127 handle->sha_ctxt.last_blk = 0;
1128 handle->sha_ctxt.first_blk = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129
1130 kfree(k_src);
1131 return err;
1132}
1133
1134static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001135 struct qcedev_handle *handle, bool ikey)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001136{
1137 int i;
1138 uint32_t constant;
1139 uint32_t sha_block_size;
1140
1141 if (ikey)
1142 constant = 0x36;
1143 else
1144 constant = 0x5c;
1145
1146 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1147 sha_block_size = SHA1_BLOCK_SIZE;
1148 else
1149 sha_block_size = SHA256_BLOCK_SIZE;
1150
Mona Hossain087c60b2011-07-20 10:34:57 -07001151 memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152 for (i = 0; i < sha_block_size; i++)
Mona Hossain087c60b2011-07-20 10:34:57 -07001153 handle->sha_ctxt.trailing_buf[i] =
1154 (handle->sha_ctxt.authkey[i] ^ constant);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155
Mona Hossain087c60b2011-07-20 10:34:57 -07001156 handle->sha_ctxt.trailing_buf_len = sha_block_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157 return 0;
1158}
1159
1160static int qcedev_hmac_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001161 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162{
1163 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001164 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165
Mona Hossain087c60b2011-07-20 10:34:57 -07001166 qcedev_sha_init(areq, handle);
1167 err = qcedev_set_hmac_auth_key(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 if (err)
1169 return err;
1170 if (!podev->ce_support.sha_hmac)
Mona Hossain087c60b2011-07-20 10:34:57 -07001171 qcedev_hmac_update_iokey(areq, handle, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172 return 0;
1173}
1174
1175static int qcedev_hmac_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001176 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177{
1178 int err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001179 struct qcedev_control *podev = handle->cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180
Mona Hossain087c60b2011-07-20 10:34:57 -07001181 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 if (podev->ce_support.sha_hmac)
1183 return err;
1184
Mona Hossain087c60b2011-07-20 10:34:57 -07001185 qcedev_hmac_update_iokey(areq, handle, false);
1186 err = qcedev_hmac_get_ohash(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001187 if (err)
1188 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001189 err = qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190
1191 return err;
1192}
1193
1194static int qcedev_hash_init(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001195 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196{
1197 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1198 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001199 return qcedev_sha_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001201 return qcedev_hmac_init(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202}
1203
1204static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001205 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206{
Mona Hossain087c60b2011-07-20 10:34:57 -07001207 return qcedev_sha_update(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208}
1209
1210static int qcedev_hash_final(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001211 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212{
1213 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1214 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
Mona Hossain087c60b2011-07-20 10:34:57 -07001215 return qcedev_sha_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001217 return qcedev_hmac_final(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218}
1219
1220static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001221 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222{
1223 int i = 0;
1224 int err = 0;
1225 struct scatterlist *sg_src = NULL;
1226 struct scatterlist *sg_dst = NULL;
1227 struct scatterlist *sg_ndex = NULL;
1228 struct file *file_src = NULL;
1229 struct file *file_dst = NULL;
1230 unsigned long paddr;
1231 unsigned long kvaddr;
1232 unsigned long len;
1233
1234 sg_src = kmalloc((sizeof(struct scatterlist) *
1235 areq->cipher_op_req.entries), GFP_KERNEL);
1236 if (sg_src == NULL) {
1237 printk(KERN_ERR "%s: Can't Allocate memory:s g_src 0x%x\n",
1238 __func__, (uint32_t)sg_src);
1239 return -ENOMEM;
1240
1241 }
1242 memset(sg_src, 0, (sizeof(struct scatterlist) *
1243 areq->cipher_op_req.entries));
1244 sg_ndex = sg_src;
1245 areq->cipher_req.creq.src = sg_src;
1246
1247 /* address src */
1248 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1249 &kvaddr, &len, &file_src);
1250
1251 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1252 sg_set_buf(sg_ndex,
1253 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1254 areq->cipher_op_req.pmem.src[i].len);
1255 sg_ndex++;
1256 }
1257 sg_mark_end(--sg_ndex);
1258
1259 for (i = 0; i < areq->cipher_op_req.entries; i++)
1260 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1261
1262 /* address dst */
1263 /* If not place encryption/decryption */
1264 if (areq->cipher_op_req.in_place_op != 1) {
1265 sg_dst = kmalloc((sizeof(struct scatterlist) *
1266 areq->cipher_op_req.entries), GFP_KERNEL);
1267 if (sg_dst == NULL)
1268 return -ENOMEM;
1269 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1270 areq->cipher_op_req.entries));
1271 areq->cipher_req.creq.dst = sg_dst;
1272 sg_ndex = sg_dst;
1273
1274 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1275 &kvaddr, &len, &file_dst);
1276 for (i = 0; i < areq->cipher_op_req.entries; i++)
1277 sg_set_buf(sg_ndex++,
1278 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1279 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1280 sg_mark_end(--sg_ndex);
1281
1282 for (i = 0; i < areq->cipher_op_req.entries; i++)
1283 areq->cipher_op_req.pmem.dst[i].offset +=
1284 (uint32_t)paddr;
1285 } else {
1286 areq->cipher_req.creq.dst = sg_src;
1287 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1288 areq->cipher_op_req.pmem.dst[i].offset =
1289 areq->cipher_op_req.pmem.src[i].offset;
1290 areq->cipher_op_req.pmem.dst[i].len =
1291 areq->cipher_op_req.pmem.src[i].len;
1292 }
1293 }
1294
1295 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1296 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1297
Mona Hossain087c60b2011-07-20 10:34:57 -07001298 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001299
1300 kfree(sg_src);
1301 kfree(sg_dst);
1302
1303 if (file_dst)
1304 put_pmem_file(file_dst);
1305 if (file_src)
1306 put_pmem_file(file_src);
1307
1308 return err;
1309};
1310
1311
1312static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001313 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001314{
1315 int err = 0;
1316 int i = 0;
1317 int j = 0;
1318 int k = 0;
1319 int num_entries = 0;
1320 uint32_t total = 0;
1321 struct qcedev_cipher_op_req *saved_req;
1322 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1323
1324 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1325 if (saved_req == NULL) {
1326 printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
1327 __func__, (uint32_t)saved_req);
1328 return -ENOMEM;
1329 }
1330 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1331
1332 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1333
1334 struct qcedev_cipher_op_req req;
1335
1336 /* save the original req structure */
1337 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1338
1339 i = 0;
1340 /* Address 32 KB at a time */
1341 while ((i < req.entries) && (err == 0)) {
1342 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1343 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1344 if (i > 0) {
1345 creq->pmem.src[0].offset =
1346 creq->pmem.src[i].offset;
1347 }
1348
1349 creq->data_len = QCE_MAX_OPER_DATA;
1350 creq->entries = 1;
1351
1352 err =
1353 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001354 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355
1356 creq->pmem.src[i].len = req.pmem.src[i].len -
1357 QCE_MAX_OPER_DATA;
1358 creq->pmem.src[i].offset =
1359 req.pmem.src[i].offset +
1360 QCE_MAX_OPER_DATA;
1361 req.pmem.src[i].offset =
1362 creq->pmem.src[i].offset;
1363 req.pmem.src[i].len = creq->pmem.src[i].len;
1364 } else {
1365 total = 0;
1366 for (j = i; j < req.entries; j++) {
1367 num_entries++;
1368 if ((total + creq->pmem.src[j].len)
1369 >= QCE_MAX_OPER_DATA) {
1370 creq->pmem.src[j].len =
1371 QCE_MAX_OPER_DATA - total;
1372 total = QCE_MAX_OPER_DATA;
1373 break;
1374 }
1375 total += creq->pmem.src[j].len;
1376 }
1377
1378 creq->data_len = total;
1379 if (i > 0)
1380 for (k = 0; k < num_entries; k++) {
1381 creq->pmem.src[k].len =
1382 creq->pmem.src[i+k].len;
1383 creq->pmem.src[k].offset =
1384 creq->pmem.src[i+k].offset;
1385 }
1386 creq->entries = num_entries;
1387
1388 i = j;
1389 err =
1390 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001391 handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392 num_entries = 0;
1393
1394 creq->pmem.src[i].offset =
1395 req.pmem.src[i].offset +
1396 creq->pmem.src[i].len;
1397 creq->pmem.src[i].len =
1398 req.pmem.src[i].len -
1399 creq->pmem.src[i].len;
1400 req.pmem.src[i].offset =
1401 creq->pmem.src[i].offset;
1402 req.pmem.src[i].len =
1403 creq->pmem.src[i].len;
1404
1405 if (creq->pmem.src[i].len == 0)
1406 i++;
1407 }
1408
1409 } /* end of while ((i < req.entries) && (err == 0)) */
1410
1411 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001412 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413
1414 /* Restore the original req structure */
1415 for (i = 0; i < saved_req->entries; i++) {
1416 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1417 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1418 }
1419 creq->entries = saved_req->entries;
1420 creq->data_len = saved_req->data_len;
1421 kfree(saved_req);
1422
1423 return err;
1424
1425}
1426
1427static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001428 int *di, struct qcedev_handle *handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429 uint8_t *k_align_src)
1430{
1431 int err = 0;
1432 int i = 0;
1433 int dst_i = *di;
1434 struct scatterlist sg_src;
1435 uint32_t byteoffset = 0;
1436 uint8_t *user_src = NULL;
1437 uint8_t *k_align_dst = k_align_src;
1438 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1439
1440
1441 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1442 byteoffset = areq->cipher_op_req.byteoffset;
1443
1444 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1445 if (user_src && __copy_from_user((k_align_src + byteoffset),
1446 (void __user *)user_src,
1447 areq->cipher_op_req.vbuf.src[0].len))
1448 return -EFAULT;
1449
1450 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1451
1452 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1453 user_src =
1454 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1455 if (user_src && __copy_from_user(k_align_src,
1456 (void __user *)user_src,
1457 areq->cipher_op_req.vbuf.src[i].len)) {
1458 return -EFAULT;
1459 }
1460 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1461 }
1462
1463 /* restore src beginning */
1464 k_align_src = k_align_dst;
1465 areq->cipher_op_req.data_len += byteoffset;
1466
1467 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1468 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1469
1470 /* In place encryption/decryption */
1471 sg_set_buf(areq->cipher_req.creq.src,
1472 k_align_dst,
1473 areq->cipher_op_req.data_len);
1474 sg_mark_end(areq->cipher_req.creq.src);
1475
1476 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1477 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1478 areq->cipher_op_req.entries = 1;
1479
Mona Hossain087c60b2011-07-20 10:34:57 -07001480 err = submit_req(areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481
1482 /* copy data to destination buffer*/
1483 creq->data_len -= byteoffset;
1484
1485 while (creq->data_len > 0) {
1486 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1487 if (err == 0 && __copy_to_user(
1488 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1489 (k_align_dst + byteoffset),
1490 creq->vbuf.dst[dst_i].len))
1491 return -EFAULT;
1492
1493 k_align_dst += creq->vbuf.dst[dst_i].len +
1494 byteoffset;
1495 creq->data_len -= creq->vbuf.dst[dst_i].len;
1496 dst_i++;
1497 } else {
1498 if (err == 0 && __copy_to_user(
1499 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1500 (k_align_dst + byteoffset),
1501 creq->data_len))
1502 return -EFAULT;
1503
1504 k_align_dst += creq->data_len;
1505 creq->vbuf.dst[dst_i].len -= creq->data_len;
1506 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1507 creq->data_len = 0;
1508 }
1509 }
1510 *di = dst_i;
1511
1512 return err;
1513};
1514
1515static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001516 struct qcedev_handle *handle)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517{
1518 int err = 0;
1519 int di = 0;
1520 int i = 0;
1521 int j = 0;
1522 int k = 0;
1523 uint32_t byteoffset = 0;
1524 int num_entries = 0;
1525 uint32_t total = 0;
1526 uint32_t len;
1527 uint8_t *k_buf_src = NULL;
1528 uint8_t *k_align_src = NULL;
1529 uint32_t max_data_xfer;
1530 struct qcedev_cipher_op_req *saved_req;
1531 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1532
1533 /* Verify Source Address's */
1534 for (i = 0; i < areq->cipher_op_req.entries; i++)
1535 if (!access_ok(VERIFY_READ,
1536 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1537 areq->cipher_op_req.vbuf.src[i].len))
1538 return -EFAULT;
1539
1540 /* Verify Destination Address's */
1541 if (areq->cipher_op_req.in_place_op != 1)
1542 for (i = 0; i < areq->cipher_op_req.entries; i++)
1543 if (!access_ok(VERIFY_READ,
1544 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1545 areq->cipher_op_req.vbuf.dst[i].len))
1546 return -EFAULT;
1547
1548 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1549 byteoffset = areq->cipher_op_req.byteoffset;
1550 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1551 GFP_KERNEL);
1552 if (k_buf_src == NULL) {
1553 printk(KERN_ERR "%s: Can't Allocate memory: k_buf_src 0x%x\n",
1554 __func__, (uint32_t)k_buf_src);
1555 return -ENOMEM;
1556 }
1557 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1558 CACHE_LINE_SIZE);
1559 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1560
1561 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1562 if (saved_req == NULL) {
1563 printk(KERN_ERR "%s: Can't Allocate memory:saved_req 0x%x\n",
1564 __func__, (uint32_t)saved_req);
1565 kfree(k_buf_src);
1566 return -ENOMEM;
1567
1568 }
1569 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1570
1571 if (areq->cipher_op_req.data_len > max_data_xfer) {
1572 struct qcedev_cipher_op_req req;
1573
1574 /* save the original req structure */
1575 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1576
1577 i = 0;
1578 /* Address 32 KB at a time */
1579 while ((i < req.entries) && (err == 0)) {
1580 if (creq->vbuf.src[i].len > max_data_xfer) {
1581 creq->vbuf.src[0].len = max_data_xfer;
1582 if (i > 0) {
1583 creq->vbuf.src[0].vaddr =
1584 creq->vbuf.src[i].vaddr;
1585 }
1586
1587 creq->data_len = max_data_xfer;
1588 creq->entries = 1;
1589
1590 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001591 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 if (err < 0) {
1593 kfree(k_buf_src);
1594 kfree(saved_req);
1595 return err;
1596 }
1597
1598 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1599 max_data_xfer;
1600 creq->vbuf.src[i].vaddr =
1601 req.vbuf.src[i].vaddr +
1602 max_data_xfer;
1603 req.vbuf.src[i].vaddr =
1604 creq->vbuf.src[i].vaddr;
1605 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1606
1607 } else {
1608 total = areq->cipher_op_req.byteoffset;
1609 for (j = i; j < req.entries; j++) {
1610 num_entries++;
1611 if ((total + creq->vbuf.src[j].len)
1612 >= max_data_xfer) {
1613 creq->vbuf.src[j].len =
1614 max_data_xfer - total;
1615 total = max_data_xfer;
1616 break;
1617 }
1618 total += creq->vbuf.src[j].len;
1619 }
1620
1621 creq->data_len = total;
1622 if (i > 0)
1623 for (k = 0; k < num_entries; k++) {
1624 creq->vbuf.src[k].len =
1625 creq->vbuf.src[i+k].len;
1626 creq->vbuf.src[k].vaddr =
1627 creq->vbuf.src[i+k].vaddr;
1628 }
1629 creq->entries = num_entries;
1630
1631 i = j;
1632 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
Mona Hossain087c60b2011-07-20 10:34:57 -07001633 &di, handle, k_align_src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 if (err < 0) {
1635 kfree(k_buf_src);
1636 kfree(saved_req);
1637 return err;
1638 }
1639
1640 num_entries = 0;
1641 areq->cipher_op_req.byteoffset = 0;
1642
1643 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1644 + creq->vbuf.src[i].len;
1645 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1646 creq->vbuf.src[i].len;
1647
1648 req.vbuf.src[i].vaddr =
1649 creq->vbuf.src[i].vaddr;
1650 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1651
1652 if (creq->vbuf.src[i].len == 0)
1653 i++;
1654 }
1655
1656 areq->cipher_op_req.byteoffset = 0;
1657 max_data_xfer = QCE_MAX_OPER_DATA;
1658 byteoffset = 0;
1659
1660 } /* end of while ((i < req.entries) && (err == 0)) */
1661 } else
Mona Hossain087c60b2011-07-20 10:34:57 -07001662 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001663 k_align_src);
1664
1665 /* Restore the original req structure */
1666 for (i = 0; i < saved_req->entries; i++) {
1667 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1668 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1669 }
1670 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1671 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1672 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1673 len += saved_req->vbuf.dst[i].len;
1674 }
1675 creq->entries = saved_req->entries;
1676 creq->data_len = saved_req->data_len;
1677 creq->byteoffset = saved_req->byteoffset;
1678
1679 kfree(saved_req);
1680 kfree(k_buf_src);
1681 return err;
1682
1683}
1684
1685static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1686 struct qcedev_control *podev)
1687{
1688 if ((req->entries == 0) || (req->data_len == 0))
1689 goto error;
1690 if ((req->alg >= QCEDEV_ALG_LAST) ||
1691 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1692 goto error;
1693 if (req->alg == QCEDEV_ALG_AES) {
1694 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1695 (!podev->ce_support.aes_xts))
1696 goto error;
1697 /* if intending to use HW key make sure key fields are set
1698 * correctly and HW key is indeed supported in target
1699 */
1700 if (req->encklen == 0) {
1701 int i;
1702 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1703 if (req->enckey[i])
1704 goto error;
1705 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1706 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1707 if (!podev->platform_support.hw_key_support)
1708 goto error;
1709 } else {
1710 if (req->encklen == QCEDEV_AES_KEY_192) {
1711 if (!podev->ce_support.aes_key_192)
1712 goto error;
1713 } else {
1714 /* if not using HW key make sure key
1715 * length is valid
1716 */
1717 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1718 (req->encklen == QCEDEV_AES_KEY_256)))
1719 goto error;
1720 }
1721 }
1722 }
1723 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1724 if (req->byteoffset) {
1725 if (req->mode != QCEDEV_AES_MODE_CTR)
1726 goto error;
1727 else { /* if using CTR mode make sure not using Pmem */
1728 if (req->use_pmem)
1729 goto error;
1730 }
1731 }
1732 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1733 if (req->use_pmem) {
1734 if (!req->in_place_op)
1735 goto error;
1736 }
1737 /* Ensure zer ivlen for ECB mode */
1738 if (req->ivlen != 0) {
1739 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1740 (req->mode == QCEDEV_DES_MODE_ECB))
1741 goto error;
1742 } else {
1743 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1744 (req->mode != QCEDEV_DES_MODE_ECB))
1745 goto error;
1746 }
1747
1748 return 0;
1749error:
1750 return -EINVAL;
1751
1752}
1753
1754static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1755 struct qcedev_control *podev)
1756{
1757 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1758 (!podev->ce_support.cmac))
1759 goto sha_error;
1760
1761 if ((req->entries == 0) || (req->data_len == 0))
1762 goto sha_error;
1763
1764 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1765 goto sha_error;
1766
1767 return 0;
1768sha_error:
1769 return -EINVAL;
1770}
1771
1772static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1773{
1774 int err = 0;
Mona Hossain087c60b2011-07-20 10:34:57 -07001775 struct qcedev_handle *handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001776 struct qcedev_control *podev;
1777 struct qcedev_async_req qcedev_areq;
1778 struct qcedev_stat *pstat;
1779
Mona Hossain087c60b2011-07-20 10:34:57 -07001780 handle = file->private_data;
1781 podev = handle->cntl;
1782 qcedev_areq.handle = handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
1784 printk(KERN_ERR "%s: invalid handle %p\n",
1785 __func__, podev);
1786 return -ENOENT;
1787 }
1788
1789 /* Verify user arguments. */
1790 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1791 return -ENOTTY;
1792
1793 init_completion(&qcedev_areq.complete);
1794 pstat = &_qcedev_stat[podev->pdev->id];
1795
1796 switch (cmd) {
1797 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001798 if (podev->platform_support.ce_shared)
1799 err = qcedev_lock_ce(podev);
1800 else
1801 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 break;
1803 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001804 if (podev->platform_support.ce_shared)
1805 err = qcedev_unlock_ce(podev);
1806 else
1807 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808 break;
1809 case QCEDEV_IOCTL_ENC_REQ:
1810 case QCEDEV_IOCTL_DEC_REQ:
1811 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1812 sizeof(struct qcedev_cipher_op_req)))
1813 return -EFAULT;
1814
1815 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1816 (void __user *)arg,
1817 sizeof(struct qcedev_cipher_op_req)))
1818 return -EFAULT;
1819 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1820
1821 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1822 podev))
1823 return -EINVAL;
1824
1825 if (qcedev_areq.cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
Mona Hossain087c60b2011-07-20 10:34:57 -07001826 err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827 else
Mona Hossain087c60b2011-07-20 10:34:57 -07001828 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 if (err)
1830 return err;
1831 if (__copy_to_user((void __user *)arg,
1832 &qcedev_areq.cipher_op_req,
1833 sizeof(struct qcedev_cipher_op_req)))
1834 return -EFAULT;
1835 break;
1836
1837 case QCEDEV_IOCTL_SHA_INIT_REQ:
1838
1839 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1840 sizeof(struct qcedev_sha_op_req)))
1841 return -EFAULT;
1842
1843 if (__copy_from_user(&qcedev_areq.sha_op_req,
1844 (void __user *)arg,
1845 sizeof(struct qcedev_sha_op_req)))
1846 return -EFAULT;
1847 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1848 return -EINVAL;
1849 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001850 err = qcedev_hash_init(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 if (err)
1852 return err;
1853 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1854 sizeof(struct qcedev_sha_op_req)))
1855 return -EFAULT;
1856 break;
1857 case QCEDEV_IOCTL_GET_CMAC_REQ:
1858 if (!podev->ce_support.cmac)
1859 return -ENOTTY;
1860 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1861 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1862 sizeof(struct qcedev_sha_op_req)))
1863 return -EFAULT;
1864
1865 if (__copy_from_user(&qcedev_areq.sha_op_req,
1866 (void __user *)arg,
1867 sizeof(struct qcedev_sha_op_req)))
1868 return -EFAULT;
1869 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1870 return -EINVAL;
1871 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1872
1873 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
Mona Hossain087c60b2011-07-20 10:34:57 -07001874 err = qcedev_hash_cmac(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001875 if (err)
1876 return err;
1877 } else {
Mona Hossain087c60b2011-07-20 10:34:57 -07001878 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 if (err)
1880 return err;
1881 }
1882
1883 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001884 &handle->sha_ctxt.digest[0],
1885 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1887 sizeof(struct qcedev_sha_op_req)))
1888 return -EFAULT;
1889 break;
1890
1891 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1892
1893 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1894 sizeof(struct qcedev_sha_op_req)))
1895 return -EFAULT;
1896
1897 if (__copy_from_user(&qcedev_areq.sha_op_req,
1898 (void __user *)arg,
1899 sizeof(struct qcedev_sha_op_req)))
1900 return -EFAULT;
1901 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1902 return -EINVAL;
1903 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001904 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 if (err)
1906 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001907 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001908 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001909 &handle->sha_ctxt.digest[0],
1910 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1912 sizeof(struct qcedev_sha_op_req)))
1913 return -EFAULT;
1914 break;
1915
1916 case QCEDEV_IOCTL_GET_SHA_REQ:
1917
1918 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1919 sizeof(struct qcedev_sha_op_req)))
1920 return -EFAULT;
1921
1922 if (__copy_from_user(&qcedev_areq.sha_op_req,
1923 (void __user *)arg,
1924 sizeof(struct qcedev_sha_op_req)))
1925 return -EFAULT;
1926 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1927 return -EINVAL;
1928 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
Mona Hossain087c60b2011-07-20 10:34:57 -07001929 qcedev_hash_init(&qcedev_areq, handle);
1930 err = qcedev_hash_update(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001931 if (err)
1932 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001933 err = qcedev_hash_final(&qcedev_areq, handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 if (err)
1935 return err;
Mona Hossain087c60b2011-07-20 10:34:57 -07001936 qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 memcpy(&qcedev_areq.sha_op_req.digest[0],
Mona Hossain087c60b2011-07-20 10:34:57 -07001938 &handle->sha_ctxt.digest[0],
1939 handle->sha_ctxt.diglen);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1941 sizeof(struct qcedev_sha_op_req)))
1942 return -EFAULT;
1943 break;
1944
1945 default:
1946 return -ENOTTY;
1947 }
1948
1949 return err;
1950}
1951
1952static int qcedev_probe(struct platform_device *pdev)
1953{
1954 void *handle = NULL;
1955 int rc = 0;
1956 struct qcedev_control *podev;
1957 struct msm_ce_hw_support *platform_support;
1958
1959 if (pdev->id >= MAX_QCE_DEVICE) {
1960 printk(KERN_ERR "%s: device id %d exceeds allowed %d\n",
1961 __func__, pdev->id, MAX_QCE_DEVICE);
1962 return -ENOENT;
1963 }
1964 podev = &qce_dev[pdev->id];
1965
1966 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
1967 podev->platform_support.ce_shared = platform_support->ce_shared;
1968 podev->platform_support.shared_ce_resource =
1969 platform_support->shared_ce_resource;
1970 podev->platform_support.hw_key_support =
1971 platform_support->hw_key_support;
Mona Hossain650c22c2011-07-19 09:54:19 -07001972 podev->ce_lock_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 INIT_LIST_HEAD(&podev->ready_commands);
1974 podev->active_command = NULL;
1975
1976 spin_lock_init(&podev->lock);
1977
1978 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
1979
1980 /* open qce */
1981 handle = qce_open(pdev, &rc);
1982 if (handle == NULL) {
1983 platform_set_drvdata(pdev, NULL);
1984 return rc;
1985 }
1986
1987 podev->qce = handle;
1988 podev->pdev = pdev;
1989 platform_set_drvdata(pdev, podev);
1990 qce_hw_support(podev->qce, &podev->ce_support);
1991 rc = misc_register(&podev->miscdevice);
1992
1993 if (rc >= 0)
1994 return 0;
1995
1996 if (handle)
1997 qce_close(handle);
1998 platform_set_drvdata(pdev, NULL);
1999 podev->qce = NULL;
2000 podev->pdev = NULL;
2001 return rc;
2002};
2003
2004static int qcedev_remove(struct platform_device *pdev)
2005{
2006 struct qcedev_control *podev;
2007
2008 podev = platform_get_drvdata(pdev);
2009 if (!podev)
2010 return 0;
2011 if (podev->qce)
2012 qce_close(podev->qce);
2013
2014 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
2015 misc_deregister(&podev->miscdevice);
2016 tasklet_kill(&podev->done_tasklet);
2017 return 0;
2018};
2019
2020static struct platform_driver qcedev_plat_driver = {
2021 .probe = qcedev_probe,
2022 .remove = qcedev_remove,
2023 .driver = {
2024 .name = "qce",
2025 .owner = THIS_MODULE,
2026 },
2027};
2028
2029static int _disp_stats(int id)
2030{
2031 struct qcedev_stat *pstat;
2032 int len = 0;
2033
2034 pstat = &_qcedev_stat[id];
2035 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2036 "\nQualcomm QCE dev driver %d Statistics:\n",
2037 id + 1);
2038
2039 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2040 " Encryption operation success : %d\n",
2041 pstat->qcedev_enc_success);
2042 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2043 " Encryption operation fail : %d\n",
2044 pstat->qcedev_enc_fail);
2045 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2046 " Decryption operation success : %d\n",
2047 pstat->qcedev_dec_success);
2048
2049 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2050 " Encryption operation fail : %d\n",
2051 pstat->qcedev_dec_fail);
2052
2053 return len;
2054}
2055
2056static int _debug_stats_open(struct inode *inode, struct file *file)
2057{
2058 file->private_data = inode->i_private;
2059 return 0;
2060}
2061
2062static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2063 size_t count, loff_t *ppos)
2064{
2065 int rc = -EINVAL;
2066 int qcedev = *((int *) file->private_data);
2067 int len;
2068
2069 len = _disp_stats(qcedev);
2070
2071 rc = simple_read_from_buffer((void __user *) buf, len,
2072 ppos, (void *) _debug_read_buf, len);
2073
2074 return rc;
2075}
2076
2077static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2078 size_t count, loff_t *ppos)
2079{
2080
2081 int qcedev = *((int *) file->private_data);
2082
2083 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2084 return count;
2085};
2086
2087static const struct file_operations _debug_stats_ops = {
2088 .open = _debug_stats_open,
2089 .read = _debug_stats_read,
2090 .write = _debug_stats_write,
2091};
2092
2093static int _qcedev_debug_init(void)
2094{
2095 int rc;
2096 char name[DEBUG_MAX_FNAME];
2097 int i;
2098 struct dentry *dent;
2099
2100 _debug_dent = debugfs_create_dir("qcedev", NULL);
2101 if (IS_ERR(_debug_dent)) {
2102 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2103 PTR_ERR(_debug_dent));
2104 return PTR_ERR(_debug_dent);
2105 }
2106
2107 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2108 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2109 _debug_qcedev[i] = i;
2110 dent = debugfs_create_file(name, 0644, _debug_dent,
2111 &_debug_qcedev[i], &_debug_stats_ops);
2112 if (dent == NULL) {
2113 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2114 PTR_ERR(dent));
2115 rc = PTR_ERR(dent);
2116 goto err;
2117 }
2118 }
2119 return 0;
2120err:
2121 debugfs_remove_recursive(_debug_dent);
2122 return rc;
2123}
2124
2125static int qcedev_init(void)
2126{
2127 int rc;
2128
2129 rc = _qcedev_debug_init();
2130 if (rc)
2131 return rc;
2132 return platform_driver_register(&qcedev_plat_driver);
2133}
2134
2135static void qcedev_exit(void)
2136{
2137 debugfs_remove_recursive(_debug_dent);
2138 platform_driver_unregister(&qcedev_plat_driver);
2139}
2140
2141MODULE_LICENSE("GPL v2");
2142MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2143MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Mona Hossain087c60b2011-07-20 10:34:57 -07002144MODULE_VERSION("1.22");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002145
2146module_init(qcedev_init);
2147module_exit(qcedev_exit);