blob: 6515ae1b172a28dd18c550ed9dd9e42d00bfe05a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm CE device driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/mman.h>
15#include <linux/android_pmem.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/kernel.h>
20#include <linux/dmapool.h>
21#include <linux/interrupt.h>
22#include <linux/spinlock.h>
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/fs.h>
26#include <linux/miscdevice.h>
27#include <linux/uaccess.h>
28#include <linux/debugfs.h>
29#include <linux/scatterlist.h>
30#include <linux/crypto.h>
31#include <crypto/hash.h>
32#include <linux/platform_data/qcom_crypto_device.h>
33#include <mach/scm.h>
34#include "inc/qcedev.h"
35#include "inc/qce.h"
36
37
38#define CACHE_LINE_SIZE 32
39#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
40
41static uint8_t _std_init_vector_sha1_uint8[] = {
42 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
43 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
44 0xC3, 0xD2, 0xE1, 0xF0
45};
46/* standard initialization vector for SHA-256, source: FIPS 180-2 */
47static uint8_t _std_init_vector_sha256_uint8[] = {
48 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
49 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
50 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
51 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
52};
53
54enum qcedev_crypto_oper_type {
55 QCEDEV_CRYPTO_OPER_CIPHER = 0,
56 QCEDEV_CRYPTO_OPER_SHA = 1,
57 QCEDEV_CRYPTO_OPER_LAST
58};
59
60struct qcedev_control;
61
62struct qcedev_cipher_req {
63 struct ablkcipher_request creq;
64 void *cookie;
65};
66
67struct qcedev_sha_req {
68 struct ahash_request sreq;
69 struct qcedev_sha_ctxt *sha_ctxt;
70 void *cookie;
71};
72
73struct qcedev_async_req {
74 struct list_head list;
75 struct completion complete;
76 enum qcedev_crypto_oper_type op_type;
77 union {
78 struct qcedev_cipher_op_req cipher_op_req;
79 struct qcedev_sha_op_req sha_op_req;
80 };
81 union{
82 struct qcedev_cipher_req cipher_req;
83 struct qcedev_sha_req sha_req;
84 };
85 struct qcedev_control *podev;
86 int err;
87};
88
Mona Hossain650c22c2011-07-19 09:54:19 -070089static DEFINE_MUTEX(send_cmd_lock);
90
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091/**********************************************************************
92 * Register ourselves as a misc device to be able to access the dev driver
93 * from userspace. */
94
95
96#define QCEDEV_DEV "qcedev"
97
98struct qcedev_control{
99
100 /* CE features supported by platform */
101 struct msm_ce_hw_support platform_support;
102
Mona Hossain650c22c2011-07-19 09:54:19 -0700103 uint32_t ce_lock_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 /* CE features/algorithms supported by HW engine*/
105 struct ce_hw_support ce_support;
106
107 /* misc device */
108 struct miscdevice miscdevice;
109
110 /* qce handle */
111 void *qce;
112
113 /* platform device */
114 struct platform_device *pdev;
115
116 unsigned magic;
117
118 struct list_head ready_commands;
119 struct qcedev_async_req *active_command;
120 spinlock_t lock;
121 struct tasklet_struct done_tasklet;
122};
123
124/*-------------------------------------------------------------------------
125* Resource Locking Service
126* ------------------------------------------------------------------------*/
127#define QCEDEV_CMD_ID 1
128#define QCEDEV_CE_LOCK_CMD 1
129#define QCEDEV_CE_UNLOCK_CMD 0
130#define NUM_RETRY 1000
131#define CE_BUSY 55
132
133static int qcedev_scm_cmd(int resource, int cmd, int *response)
134{
135#ifdef CONFIG_MSM_SCM
136
137 struct {
138 int resource;
139 int cmd;
140 } cmd_buf;
141
142 cmd_buf.resource = resource;
143 cmd_buf.cmd = cmd;
144
145 return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf,
146 sizeof(cmd_buf), response, sizeof(*response));
147
148#else
149 return 0;
150#endif
151}
152
153static int qcedev_unlock_ce(struct qcedev_control *podev)
154{
Mona Hossain650c22c2011-07-19 09:54:19 -0700155 int ret = 0;
156
157 mutex_lock(&send_cmd_lock);
158 if (podev->ce_lock_count == 1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159 int response = 0;
160
161 if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource,
162 QCEDEV_CE_UNLOCK_CMD, &response)) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700163 pr_err("Failed to release CE lock\n");
164 ret = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700167 if (ret == 0) {
168 if (podev->ce_lock_count)
169 podev->ce_lock_count--;
170 else {
171 /* We should never be here */
172 ret = -EIO;
173 pr_err("CE hardware is already unlocked\n");
174 }
175 }
176 mutex_unlock(&send_cmd_lock);
177
178 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179}
180
181static int qcedev_lock_ce(struct qcedev_control *podev)
182{
Mona Hossain650c22c2011-07-19 09:54:19 -0700183 int ret = 0;
184
185 mutex_lock(&send_cmd_lock);
186 if (podev->ce_lock_count == 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 int response = -CE_BUSY;
188 int i = 0;
189
190 do {
191 if (qcedev_scm_cmd(
192 podev->platform_support.shared_ce_resource,
193 QCEDEV_CE_LOCK_CMD, &response)) {
194 response = -EINVAL;
195 break;
196 }
197 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
198
Mona Hossain650c22c2011-07-19 09:54:19 -0700199 if ((response == -CE_BUSY) && (i >= NUM_RETRY)) {
200 ret = -EUSERS;
201 } else {
202 if (response < 0)
203 ret = -EINVAL;
204 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205 }
Mona Hossain650c22c2011-07-19 09:54:19 -0700206 if (ret == 0)
207 podev->ce_lock_count++;
208 mutex_unlock(&send_cmd_lock);
209 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210}
211
212#define QCEDEV_MAGIC 0x56434544 /* "qced" */
213
214static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
215static int qcedev_open(struct inode *inode, struct file *file);
216static int qcedev_release(struct inode *inode, struct file *file);
217static int start_cipher_req(struct qcedev_control *podev);
Mona Hossain650c22c2011-07-19 09:54:19 -0700218static int start_sha_req(struct qcedev_control *podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219
220static const struct file_operations qcedev_fops = {
221 .owner = THIS_MODULE,
222 .unlocked_ioctl = qcedev_ioctl,
223 .open = qcedev_open,
224 .release = qcedev_release,
225};
226
227static struct qcedev_control qce_dev[] = {
228 {
229 .miscdevice = {
230 .minor = MISC_DYNAMIC_MINOR,
231 .name = "qce",
232 .fops = &qcedev_fops,
233 },
234 .magic = QCEDEV_MAGIC,
235 },
236};
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
239#define DEBUG_MAX_FNAME 16
240#define DEBUG_MAX_RW_BUF 1024
241
242struct qcedev_stat {
243 u32 qcedev_dec_success;
244 u32 qcedev_dec_fail;
245 u32 qcedev_enc_success;
246 u32 qcedev_enc_fail;
247 u32 qcedev_sha_success;
248 u32 qcedev_sha_fail;
249};
250
251static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE];
252static struct dentry *_debug_dent;
253static char _debug_read_buf[DEBUG_MAX_RW_BUF];
254static int _debug_qcedev[MAX_QCE_DEVICE];
255
256static struct qcedev_control *qcedev_minor_to_control(unsigned n)
257{
258 int i;
259
260 for (i = 0; i < MAX_QCE_DEVICE; i++) {
261 if (qce_dev[i].miscdevice.minor == n)
262 return &qce_dev[i];
263 }
264 return NULL;
265}
266
267static int qcedev_open(struct inode *inode, struct file *file)
268{
269 struct qcedev_control *podev;
270
271 podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
272 if (podev == NULL) {
273 printk(KERN_ERR "%s: no such device %d\n", __func__,
274 MINOR(inode->i_rdev));
275 return -ENOENT;
276 }
277
278 file->private_data = podev;
279
280 return 0;
281}
282
283static int qcedev_release(struct inode *inode, struct file *file)
284{
285 struct qcedev_control *podev;
286
287 podev = file->private_data;
288
289 if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
290 printk(KERN_ERR "%s: invalid handle %p\n",
291 __func__, podev);
292 }
293
294 file->private_data = NULL;
295
296 return 0;
297}
298
299static void req_done(unsigned long data)
300{
301 struct qcedev_control *podev = (struct qcedev_control *)data;
302 struct qcedev_async_req *areq;
303 unsigned long flags = 0;
304 struct qcedev_async_req *new_req = NULL;
305 int ret = 0;
306
307 spin_lock_irqsave(&podev->lock, flags);
308 areq = podev->active_command;
309 podev->active_command = NULL;
310
311again:
312 if (!list_empty(&podev->ready_commands)) {
313 new_req = container_of(podev->ready_commands.next,
314 struct qcedev_async_req, list);
315 list_del(&new_req->list);
316 podev->active_command = new_req;
317 new_req->err = 0;
318 if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
319 ret = start_cipher_req(podev);
320 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700321 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700322 }
323
324 spin_unlock_irqrestore(&podev->lock, flags);
325
326 if (areq)
327 complete(&areq->complete);
328
329 if (new_req && ret) {
330 complete(&new_req->complete);
331 spin_lock_irqsave(&podev->lock, flags);
332 podev->active_command = NULL;
333 areq = NULL;
334 ret = 0;
335 new_req = NULL;
336 goto again;
337 }
338
339 return;
340}
341
342static void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
343 unsigned char *authdata, int ret)
344{
345 struct qcedev_sha_req *areq;
346 struct qcedev_control *pdev;
347 uint32_t *auth32 = (uint32_t *)authdata;
348
349 areq = (struct qcedev_sha_req *) cookie;
350 pdev = (struct qcedev_control *) areq->cookie;
351
352 if (digest)
353 memcpy(&areq->sha_ctxt->digest[0], digest, 32);
354
355 if (authdata) {
356 areq->sha_ctxt->auth_data[0] = auth32[0];
357 areq->sha_ctxt->auth_data[1] = auth32[1];
358 areq->sha_ctxt->auth_data[2] = auth32[2];
359 areq->sha_ctxt->auth_data[3] = auth32[3];
360 }
361
362 tasklet_schedule(&pdev->done_tasklet);
363};
364
365
366static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
367 unsigned char *iv, int ret)
368{
369 struct qcedev_cipher_req *areq;
370 struct qcedev_control *pdev;
371 struct qcedev_async_req *qcedev_areq;
372
373 areq = (struct qcedev_cipher_req *) cookie;
374 pdev = (struct qcedev_control *) areq->cookie;
375 qcedev_areq = pdev->active_command;
376
377 if (iv)
378 memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
379 qcedev_areq->cipher_op_req.ivlen);
380 tasklet_schedule(&pdev->done_tasklet);
381};
382
383static int start_cipher_req(struct qcedev_control *podev)
384{
385 struct qcedev_async_req *qcedev_areq;
386 struct qce_req creq;
387 int ret = 0;
388
389 /* start the command on the podev->active_command */
390 qcedev_areq = podev->active_command;
391 qcedev_areq->podev = podev;
392
393 qcedev_areq->cipher_req.cookie = qcedev_areq->podev;
394 creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem;
395 if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
396 creq.pmem = &qcedev_areq->cipher_op_req.pmem;
397 else
398 creq.pmem = NULL;
399
400 switch (qcedev_areq->cipher_op_req.alg) {
401 case QCEDEV_ALG_DES:
402 creq.alg = CIPHER_ALG_DES;
403 break;
404 case QCEDEV_ALG_3DES:
405 creq.alg = CIPHER_ALG_3DES;
406 break;
407 case QCEDEV_ALG_AES:
408 creq.alg = CIPHER_ALG_AES;
409 break;
410 default:
411 break;
412 };
413
414 switch (qcedev_areq->cipher_op_req.mode) {
415 case QCEDEV_AES_MODE_CBC:
416 case QCEDEV_DES_MODE_CBC:
417 creq.mode = QCE_MODE_CBC;
418 break;
419 case QCEDEV_AES_MODE_ECB:
420 case QCEDEV_DES_MODE_ECB:
421 creq.mode = QCE_MODE_ECB;
422 break;
423 case QCEDEV_AES_MODE_CTR:
424 creq.mode = QCE_MODE_CTR;
425 break;
426 case QCEDEV_AES_MODE_XTS:
427 creq.mode = QCE_MODE_XTS;
428 break;
429 default:
430 break;
431 };
432
433 if ((creq.alg == CIPHER_ALG_AES) &&
434 (creq.mode == QCE_MODE_CTR)) {
435 creq.dir = QCE_ENCRYPT;
436 } else {
437 if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op)
438 creq.dir = QCE_ENCRYPT;
439 else
440 creq.dir = QCE_DECRYPT;
441 }
442
443 creq.iv = &qcedev_areq->cipher_op_req.iv[0];
444 creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
445
446 creq.enckey = &qcedev_areq->cipher_op_req.enckey[0];
447 creq.encklen = qcedev_areq->cipher_op_req.encklen;
448
449 creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
450
451 if (qcedev_areq->cipher_op_req.encklen == 0) {
452 if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
453 || (qcedev_areq->cipher_op_req.op ==
454 QCEDEV_OPER_DEC_NO_KEY))
455 creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
456 else {
457 int i;
458
459 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
460 if (qcedev_areq->cipher_op_req.enckey[i] != 0)
461 break;
462 }
463
464 if ((podev->platform_support.hw_key_support == 1) &&
465 (i == QCEDEV_MAX_KEY_SIZE))
466 creq.op = QCE_REQ_ABLK_CIPHER;
467 else {
468 ret = -EINVAL;
469 goto unsupported;
470 }
471 }
472 } else {
473 creq.op = QCE_REQ_ABLK_CIPHER;
474 }
475
476 creq.qce_cb = qcedev_cipher_req_cb;
477 creq.areq = (void *)&qcedev_areq->cipher_req;
478
479 ret = qce_ablk_cipher_req(podev->qce, &creq);
480unsupported:
481 if (ret)
482 qcedev_areq->err = -ENXIO;
483 else
484 qcedev_areq->err = 0;
485 return ret;
486};
487
Mona Hossain650c22c2011-07-19 09:54:19 -0700488static int start_sha_req(struct qcedev_control *podev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489{
490 struct qcedev_async_req *qcedev_areq;
491 struct qce_sha_req sreq;
492 int ret = 0;
493
494 /* start the command on the podev->active_command */
495 qcedev_areq = podev->active_command;
496 qcedev_areq->podev = podev;
497
498 switch (qcedev_areq->sha_op_req.alg) {
499 case QCEDEV_ALG_SHA1:
500 sreq.alg = QCE_HASH_SHA1;
501 break;
502 case QCEDEV_ALG_SHA256:
503 sreq.alg = QCE_HASH_SHA256;
504 break;
505 case QCEDEV_ALG_SHA1_HMAC:
506 if (podev->ce_support.sha_hmac) {
507 sreq.alg = QCE_HASH_SHA1_HMAC;
508 sreq.authkey =
509 &qcedev_areq->sha_op_req.ctxt.authkey[0];
510
511 } else {
512 sreq.alg = QCE_HASH_SHA1;
513 sreq.authkey = NULL;
514 }
515 break;
516 case QCEDEV_ALG_SHA256_HMAC:
517 if (podev->ce_support.sha_hmac) {
518 sreq.alg = QCE_HASH_SHA256_HMAC;
519 sreq.authkey =
520 &qcedev_areq->sha_op_req.ctxt.authkey[0];
521
522 } else {
523 sreq.alg = QCE_HASH_SHA256;
524 sreq.authkey = NULL;
525 }
526 break;
527 case QCEDEV_ALG_AES_CMAC:
528 sreq.alg = QCE_HASH_AES_CMAC;
529 sreq.authkey = &qcedev_areq->sha_op_req.ctxt.authkey[0];
530 sreq.authklen = qcedev_areq->sha_op_req.authklen;
531 break;
532 default:
533 break;
534 };
535
536 qcedev_areq->sha_req.cookie = podev;
537
538 sreq.qce_cb = qcedev_sha_req_cb;
539 if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
Mona Hossain650c22c2011-07-19 09:54:19 -0700540 sreq.auth_data[0] = qcedev_areq->sha_op_req.ctxt.auth_data[0];
541 sreq.auth_data[1] = qcedev_areq->sha_op_req.ctxt.auth_data[1];
542 sreq.auth_data[2] = qcedev_areq->sha_op_req.ctxt.auth_data[2];
543 sreq.auth_data[3] = qcedev_areq->sha_op_req.ctxt.auth_data[3];
544 sreq.digest = &qcedev_areq->sha_op_req.ctxt.digest[0];
545 sreq.first_blk = qcedev_areq->sha_op_req.ctxt.first_blk;
546 sreq.last_blk = qcedev_areq->sha_op_req.ctxt.last_blk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700547 }
548 sreq.size = qcedev_areq->sha_req.sreq.nbytes;
549 sreq.src = qcedev_areq->sha_req.sreq.src;
550 sreq.areq = (void *)&qcedev_areq->sha_req;
551 qcedev_areq->sha_req.sha_ctxt =
Mona Hossain650c22c2011-07-19 09:54:19 -0700552 (struct qcedev_sha_ctxt *)(&qcedev_areq->sha_op_req.ctxt);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553
554 ret = qce_process_sha_req(podev->qce, &sreq);
555
556 if (ret)
557 qcedev_areq->err = -ENXIO;
558 else
559 qcedev_areq->err = 0;
560 return ret;
561};
562
563static int submit_req(struct qcedev_async_req *qcedev_areq,
564 struct qcedev_control *podev)
565{
566 unsigned long flags = 0;
567 int ret = 0;
568 struct qcedev_stat *pstat;
569
570 qcedev_areq->err = 0;
571
Mona Hossain650c22c2011-07-19 09:54:19 -0700572 if (podev->platform_support.ce_shared) {
573 ret = qcedev_lock_ce(podev);
574 if (ret)
575 return ret;
576 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577
578 spin_lock_irqsave(&podev->lock, flags);
579
580 if (podev->active_command == NULL) {
581 podev->active_command = qcedev_areq;
582 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
583 ret = start_cipher_req(podev);
584 else
Mona Hossain650c22c2011-07-19 09:54:19 -0700585 ret = start_sha_req(podev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586 } else {
587 list_add_tail(&qcedev_areq->list, &podev->ready_commands);
588 }
589
590 if (ret != 0)
591 podev->active_command = NULL;
592
593 spin_unlock_irqrestore(&podev->lock, flags);
594
595 if (ret == 0)
596 wait_for_completion(&qcedev_areq->complete);
597
Mona Hossain650c22c2011-07-19 09:54:19 -0700598 if (podev->platform_support.ce_shared)
599 ret = qcedev_unlock_ce(podev);
600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 if (ret)
Mona Hossain650c22c2011-07-19 09:54:19 -0700602 qcedev_areq->err = -EIO;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700603
604 pstat = &_qcedev_stat[podev->pdev->id];
605 if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
606 switch (qcedev_areq->cipher_op_req.op) {
607 case QCEDEV_OPER_DEC:
608 if (qcedev_areq->err)
609 pstat->qcedev_dec_fail++;
610 else
611 pstat->qcedev_dec_success++;
612 break;
613 case QCEDEV_OPER_ENC:
614 if (qcedev_areq->err)
615 pstat->qcedev_enc_fail++;
616 else
617 pstat->qcedev_enc_success++;
618 break;
619 default:
620 break;
621 };
622 } else {
623 if (qcedev_areq->err)
624 pstat->qcedev_sha_fail++;
625 else
626 pstat->qcedev_sha_success++;
627 }
628
629 return qcedev_areq->err;
630}
631
632static int qcedev_sha_init(struct qcedev_async_req *areq,
633 struct qcedev_control *podev)
634{
635 struct qcedev_sha_ctxt *sha_ctxt = &areq->sha_op_req.ctxt;
636
637 memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
638 sha_ctxt->first_blk = 1;
639
640 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
641 (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
642 memcpy(&sha_ctxt->digest[0],
643 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
644 sha_ctxt->diglen = SHA1_DIGEST_SIZE;
645 } else {
646 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
647 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
648 memcpy(&sha_ctxt->digest[0],
649 &_std_init_vector_sha256_uint8[0],
650 SHA256_DIGEST_SIZE);
651 sha_ctxt->diglen = SHA256_DIGEST_SIZE;
652 }
653 }
654 return 0;
655}
656
657
658static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
659 struct qcedev_control *podev)
660{
661 int err = 0;
662 int i = 0;
663 struct scatterlist sg_src[2];
664 uint32_t total;
665
666 uint8_t *user_src = NULL;
667 uint8_t *k_src = NULL;
668 uint8_t *k_buf_src = NULL;
669 uint8_t *k_align_src = NULL;
670
671 uint32_t sha_pad_len = 0;
672 uint32_t trailing_buf_len = 0;
673 uint32_t t_buf = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
674 uint32_t sha_block_size;
675
676 total = qcedev_areq->sha_op_req.data_len + t_buf;
677
678 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
679 sha_block_size = SHA1_BLOCK_SIZE;
680 else
681 sha_block_size = SHA256_BLOCK_SIZE;
682
683 if (total <= sha_block_size) {
684 uint32_t len = qcedev_areq->sha_op_req.data_len;
685
686 i = 0;
687
688 k_src = &qcedev_areq->sha_op_req.ctxt.trailing_buf[t_buf];
689
690 /* Copy data from user src(s) */
691 while (len > 0) {
692 user_src =
693 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
694 if (user_src && __copy_from_user(k_src,
695 (void __user *)user_src,
696 qcedev_areq->sha_op_req.data[i].len))
697 return -EFAULT;
698
699 len -= qcedev_areq->sha_op_req.data[i].len;
700 k_src += qcedev_areq->sha_op_req.data[i].len;
701 i++;
702 }
703 qcedev_areq->sha_op_req.ctxt.trailing_buf_len = total;
704
705 return 0;
706 }
707
708
709 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
710 GFP_KERNEL);
711 if (k_buf_src == NULL)
712 return -ENOMEM;
713
714 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
715 CACHE_LINE_SIZE);
716 k_src = k_align_src;
717
718 /* check for trailing buffer from previous updates and append it */
719 if (t_buf > 0) {
720 memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
721 t_buf);
722 k_src += t_buf;
723 }
724
725 /* Copy data from user src(s) */
726 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
727 if (user_src && __copy_from_user(k_src,
728 (void __user *)user_src,
729 qcedev_areq->sha_op_req.data[0].len)) {
730 kfree(k_buf_src);
731 return -EFAULT;
732 }
733 k_src += qcedev_areq->sha_op_req.data[0].len;
734 for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
735 user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
736 if (user_src && __copy_from_user(k_src,
737 (void __user *)user_src,
738 qcedev_areq->sha_op_req.data[i].len)) {
739 kfree(k_buf_src);
740 return -EFAULT;
741 }
742 k_src += qcedev_areq->sha_op_req.data[i].len;
743 }
744
745 /* get new trailing buffer */
746 sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
747 trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len;
748
749 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
750 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
751 total-trailing_buf_len);
752 sg_mark_end(qcedev_areq->sha_req.sreq.src);
753
754 qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
755
756 /* update sha_ctxt trailing buf content to new trailing buf */
757 if (trailing_buf_len > 0) {
758 memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
759 memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
760 (k_src - trailing_buf_len),
761 trailing_buf_len);
762 }
763 qcedev_areq->sha_op_req.ctxt.trailing_buf_len = trailing_buf_len;
764
765 err = submit_req(qcedev_areq, podev);
766
767 qcedev_areq->sha_op_req.ctxt.last_blk = 0;
768 qcedev_areq->sha_op_req.ctxt.first_blk = 0;
769
770 kfree(k_buf_src);
771 return err;
772}
773
774static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
775 struct qcedev_control *podev)
776{
777 int err = 0;
778 int i = 0;
779 int j = 0;
780 int k = 0;
781 int num_entries = 0;
782 uint32_t total = 0;
783
784 /* verify address src(s) */
785 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
786 if (!access_ok(VERIFY_READ,
787 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
788 qcedev_areq->sha_op_req.data[i].len))
789 return -EFAULT;
790
791 if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
792
793 struct qcedev_sha_op_req *saved_req;
794 struct qcedev_sha_op_req req;
795 struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
796
797 /* save the original req structure */
798 saved_req =
799 kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
800 if (saved_req == NULL) {
801 printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
802 __func__, (uint32_t)saved_req);
803 return -ENOMEM;
804 }
805 memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
806 memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
807
808 i = 0;
809 /* Address 32 KB at a time */
810 while ((i < req.entries) && (err == 0)) {
811 if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
812 sreq->data[0].len = QCE_MAX_OPER_DATA;
813 if (i > 0) {
814 sreq->data[0].vaddr =
815 sreq->data[i].vaddr;
816 }
817
818 sreq->data_len = QCE_MAX_OPER_DATA;
819 sreq->entries = 1;
820
821 err = qcedev_sha_update_max_xfer(qcedev_areq,
822 podev);
823
824 sreq->data[i].len = req.data[i].len -
825 QCE_MAX_OPER_DATA;
826 sreq->data[i].vaddr = req.data[i].vaddr +
827 QCE_MAX_OPER_DATA;
828 req.data[i].vaddr = sreq->data[i].vaddr;
829 req.data[i].len = sreq->data[i].len;
830 } else {
831 total = 0;
832 for (j = i; j < req.entries; j++) {
833 num_entries++;
834 if ((total + sreq->data[j].len) >=
835 QCE_MAX_OPER_DATA) {
836 sreq->data[j].len =
837 (QCE_MAX_OPER_DATA - total);
838 total = QCE_MAX_OPER_DATA;
839 break;
840 }
841 total += sreq->data[j].len;
842 }
843
844 sreq->data_len = total;
845 if (i > 0)
846 for (k = 0; k < num_entries; k++) {
847 sreq->data[k].len =
848 sreq->data[i+k].len;
849 sreq->data[k].vaddr =
850 sreq->data[i+k].vaddr;
851 }
852 sreq->entries = num_entries;
853
854 i = j;
855 err = qcedev_sha_update_max_xfer(qcedev_areq,
856 podev);
857 num_entries = 0;
858
859 sreq->data[i].vaddr = req.data[i].vaddr +
860 sreq->data[i].len;
861 sreq->data[i].len = req.data[i].len -
862 sreq->data[i].len;
863 req.data[i].vaddr = sreq->data[i].vaddr;
864 req.data[i].len = sreq->data[i].len;
865
866 if (sreq->data[i].len == 0)
867 i++;
868 }
869 } /* end of while ((i < req.entries) && (err == 0)) */
870
871 /* Restore the original req structure */
872 for (i = 0; i < saved_req->entries; i++) {
873 sreq->data[i].len = saved_req->data[i].len;
874 sreq->data[i].vaddr = saved_req->data[i].vaddr;
875 }
876 sreq->entries = saved_req->entries;
877 sreq->data_len = saved_req->data_len;
878 kfree(saved_req);
879 } else
880 err = qcedev_sha_update_max_xfer(qcedev_areq, podev);
881
882 return err;
883}
884
885static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
886 struct qcedev_control *podev)
887{
888 int err = 0;
889 struct scatterlist sg_src;
890 uint32_t total;
891
892 uint8_t *k_buf_src = NULL;
893 uint8_t *k_align_src = NULL;
894
895 qcedev_areq->sha_op_req.ctxt.first_blk = 0;
896 qcedev_areq->sha_op_req.ctxt.last_blk = 1;
897
898 total = qcedev_areq->sha_op_req.ctxt.trailing_buf_len;
899
900 if (total) {
901 k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
902 GFP_KERNEL);
903 if (k_buf_src == NULL)
904 return -ENOMEM;
905
906 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
907 CACHE_LINE_SIZE);
908 memcpy(k_align_src,
909 &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
910 total);
911 }
912 qcedev_areq->sha_op_req.ctxt.last_blk = 1;
913 qcedev_areq->sha_op_req.ctxt.first_blk = 0;
914
915 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
916 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
917 sg_mark_end(qcedev_areq->sha_req.sreq.src);
918
919 qcedev_areq->sha_req.sreq.nbytes = total;
920
921 err = submit_req(qcedev_areq, podev);
922
923 qcedev_areq->sha_op_req.ctxt.first_blk = 0;
924 qcedev_areq->sha_op_req.ctxt.last_blk = 0;
925 qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
926 qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
Mona Hossain650c22c2011-07-19 09:54:19 -0700927 qcedev_areq->sha_op_req.ctxt.auth_data[2] = 0;
928 qcedev_areq->sha_op_req.ctxt.auth_data[3] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700929 qcedev_areq->sha_op_req.ctxt.trailing_buf_len = 0;
930 memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0, 64);
931
932 kfree(k_buf_src);
933 return err;
934}
935
936static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
937 struct qcedev_control *podev)
938{
939 int err = 0;
940 int i = 0;
941 struct scatterlist sg_src[2];
942 uint32_t total;
943
944 uint8_t *user_src = NULL;
945 uint8_t *k_src = NULL;
946 uint8_t *k_buf_src = NULL;
947
948 total = qcedev_areq->sha_op_req.data_len;
949
950 /* verify address src(s) */
951 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++)
952 if (!access_ok(VERIFY_READ,
953 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr,
954 qcedev_areq->sha_op_req.data[i].len))
955 return -EFAULT;
956
957 /* Verify Source Address */
958 if (!access_ok(VERIFY_READ,
959 (void __user *)qcedev_areq->sha_op_req.authkey,
960 qcedev_areq->sha_op_req.authklen))
961 return -EFAULT;
962 if (__copy_from_user(&qcedev_areq->sha_op_req.ctxt.authkey[0],
963 (void __user *)qcedev_areq->sha_op_req.authkey,
964 qcedev_areq->sha_op_req.authklen))
965 return -EFAULT;
966
967
968 k_buf_src = kmalloc(total, GFP_KERNEL);
969 if (k_buf_src == NULL)
970 return -ENOMEM;
971
972 k_src = k_buf_src;
973
974 /* Copy data from user src(s) */
975 user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
976 for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
977 user_src =
978 (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
979 if (user_src && __copy_from_user(k_src, (void __user *)user_src,
980 qcedev_areq->sha_op_req.data[i].len)) {
981 kfree(k_buf_src);
982 return -EFAULT;
983 }
984 k_src += qcedev_areq->sha_op_req.data[i].len;
985 }
986
987 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0];
988 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
989 sg_mark_end(qcedev_areq->sha_req.sreq.src);
990
991 qcedev_areq->sha_req.sreq.nbytes = total;
992 qcedev_areq->sha_op_req.ctxt.diglen = qcedev_areq->sha_op_req.diglen;
993 err = submit_req(qcedev_areq, podev);
994
995 kfree(k_buf_src);
996 return err;
997}
998
999static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
1000 struct qcedev_control *podev)
1001{
1002 int err = 0;
1003
1004 if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
1005 /* Verify Source Address */
1006 if (!access_ok(VERIFY_READ,
1007 (void __user *)areq->sha_op_req.authkey,
1008 areq->sha_op_req.authklen))
1009 return -EFAULT;
1010 if (__copy_from_user(&areq->sha_op_req.ctxt.authkey[0],
1011 (void __user *)areq->sha_op_req.authkey,
1012 areq->sha_op_req.authklen))
1013 return -EFAULT;
1014 } else {
1015 struct qcedev_async_req authkey_areq;
1016
1017 init_completion(&authkey_areq.complete);
1018
1019 authkey_areq.sha_op_req.entries = 1;
1020 authkey_areq.sha_op_req.data[0].vaddr =
1021 areq->sha_op_req.authkey;
1022 authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
1023 authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
1024 authkey_areq.sha_op_req.diglen = 0;
1025 memset(&authkey_areq.sha_op_req.digest[0], 0,
1026 QCEDEV_MAX_SHA_DIGEST);
1027 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1028 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
1029 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
1030 authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
1031
1032 authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1033
1034 qcedev_sha_init(&authkey_areq, podev);
1035 err = qcedev_sha_update(&authkey_areq, podev);
1036 if (!err)
1037 err = qcedev_sha_final(&authkey_areq, podev);
1038 else
1039 return err;
1040 memcpy(&areq->sha_op_req.ctxt.authkey[0],
1041 &authkey_areq.sha_op_req.ctxt.digest[0],
1042 authkey_areq.sha_op_req.ctxt.diglen);
1043 }
1044 return err;
1045}
1046
1047static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
1048 struct qcedev_control *podev)
1049{
1050 int err = 0;
1051 struct scatterlist sg_src;
1052 uint8_t *k_src = NULL;
1053 uint32_t sha_block_size = 0;
1054 uint32_t sha_digest_size = 0;
1055
1056 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1057 sha_digest_size = SHA1_DIGEST_SIZE;
1058 sha_block_size = SHA1_BLOCK_SIZE;
1059 } else {
1060 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1061 sha_digest_size = SHA256_DIGEST_SIZE;
1062 sha_block_size = SHA256_BLOCK_SIZE;
1063 }
1064 }
1065 k_src = kmalloc(sha_block_size, GFP_KERNEL);
1066 if (k_src == NULL)
1067 return -ENOMEM;
1068
1069 /* check for trailing buffer from previous updates and append it */
1070 memcpy(k_src, &qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
1071 qcedev_areq->sha_op_req.ctxt.trailing_buf_len);
1072
1073 qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
1074 sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
1075 sg_mark_end(qcedev_areq->sha_req.sreq.src);
1076
1077 qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
1078 memset(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0], 0,
1079 sha_block_size);
1080 memcpy(&qcedev_areq->sha_op_req.ctxt.trailing_buf[0],
1081 &qcedev_areq->sha_op_req.ctxt.digest[0],
1082 sha_digest_size);
1083 qcedev_areq->sha_op_req.ctxt.trailing_buf_len = sha_digest_size;
1084
1085 qcedev_areq->sha_op_req.ctxt.first_blk = 1;
1086 qcedev_areq->sha_op_req.ctxt.last_blk = 0;
1087 qcedev_areq->sha_op_req.ctxt.auth_data[0] = 0;
1088 qcedev_areq->sha_op_req.ctxt.auth_data[1] = 0;
1089
1090 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
1091 memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
1092 &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
1093 qcedev_areq->sha_op_req.ctxt.diglen = SHA1_DIGEST_SIZE;
1094 }
1095
1096 if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
1097 memcpy(&qcedev_areq->sha_op_req.ctxt.digest[0],
1098 &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
1099 qcedev_areq->sha_op_req.ctxt.diglen = SHA256_DIGEST_SIZE;
1100 }
1101 err = submit_req(qcedev_areq, podev);
1102
1103 qcedev_areq->sha_op_req.ctxt.last_blk = 0;
1104 qcedev_areq->sha_op_req.ctxt.first_blk = 0;
1105
1106 kfree(k_src);
1107 return err;
1108}
1109
1110static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
1111 struct qcedev_control *podev, bool ikey)
1112{
1113 int i;
1114 uint32_t constant;
1115 uint32_t sha_block_size;
1116
1117 if (ikey)
1118 constant = 0x36;
1119 else
1120 constant = 0x5c;
1121
1122 if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
1123 sha_block_size = SHA1_BLOCK_SIZE;
1124 else
1125 sha_block_size = SHA256_BLOCK_SIZE;
1126
1127 memset(&areq->sha_op_req.ctxt.trailing_buf[0], 0, sha_block_size);
1128 for (i = 0; i < sha_block_size; i++)
1129 areq->sha_op_req.ctxt.trailing_buf[i] =
1130 (areq->sha_op_req.ctxt.authkey[i] ^ constant);
1131
1132 areq->sha_op_req.ctxt.trailing_buf_len = sha_block_size;
1133 return 0;
1134}
1135
1136static int qcedev_hmac_init(struct qcedev_async_req *areq,
1137 struct qcedev_control *podev)
1138{
1139 int err;
1140
1141 qcedev_sha_init(areq, podev);
1142 err = qcedev_set_hmac_auth_key(areq, podev);
1143 if (err)
1144 return err;
1145 if (!podev->ce_support.sha_hmac)
1146 qcedev_hmac_update_iokey(areq, podev, true);
1147 return 0;
1148}
1149
1150static int qcedev_hmac_final(struct qcedev_async_req *areq,
1151 struct qcedev_control *podev)
1152{
1153 int err;
1154
1155 err = qcedev_sha_final(areq, podev);
1156 if (podev->ce_support.sha_hmac)
1157 return err;
1158
1159 qcedev_hmac_update_iokey(areq, podev, false);
1160 err = qcedev_hmac_get_ohash(areq, podev);
1161 if (err)
1162 return err;
1163 err = qcedev_sha_final(areq, podev);
1164
1165 return err;
1166}
1167
1168static int qcedev_hash_init(struct qcedev_async_req *areq,
1169 struct qcedev_control *podev)
1170{
1171 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1172 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1173 return qcedev_sha_init(areq, podev);
1174 else
1175 return qcedev_hmac_init(areq, podev);
1176}
1177
1178static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
1179 struct qcedev_control *podev)
1180{
1181 return qcedev_sha_update(qcedev_areq, podev);
1182}
1183
1184static int qcedev_hash_final(struct qcedev_async_req *areq,
1185 struct qcedev_control *podev)
1186{
1187 if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
1188 (areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
1189 return qcedev_sha_final(areq, podev);
1190 else
1191 return qcedev_hmac_final(areq, podev);
1192}
1193
1194static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
1195 struct qcedev_control *podev)
1196{
1197 int i = 0;
1198 int err = 0;
1199 struct scatterlist *sg_src = NULL;
1200 struct scatterlist *sg_dst = NULL;
1201 struct scatterlist *sg_ndex = NULL;
1202 struct file *file_src = NULL;
1203 struct file *file_dst = NULL;
1204 unsigned long paddr;
1205 unsigned long kvaddr;
1206 unsigned long len;
1207
1208 sg_src = kmalloc((sizeof(struct scatterlist) *
1209 areq->cipher_op_req.entries), GFP_KERNEL);
1210 if (sg_src == NULL) {
1211 printk(KERN_ERR "%s: Can't Allocate memory:s g_src 0x%x\n",
1212 __func__, (uint32_t)sg_src);
1213 return -ENOMEM;
1214
1215 }
1216 memset(sg_src, 0, (sizeof(struct scatterlist) *
1217 areq->cipher_op_req.entries));
1218 sg_ndex = sg_src;
1219 areq->cipher_req.creq.src = sg_src;
1220
1221 /* address src */
1222 get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr,
1223 &kvaddr, &len, &file_src);
1224
1225 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1226 sg_set_buf(sg_ndex,
1227 ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr),
1228 areq->cipher_op_req.pmem.src[i].len);
1229 sg_ndex++;
1230 }
1231 sg_mark_end(--sg_ndex);
1232
1233 for (i = 0; i < areq->cipher_op_req.entries; i++)
1234 areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr;
1235
1236 /* address dst */
1237 /* If not place encryption/decryption */
1238 if (areq->cipher_op_req.in_place_op != 1) {
1239 sg_dst = kmalloc((sizeof(struct scatterlist) *
1240 areq->cipher_op_req.entries), GFP_KERNEL);
1241 if (sg_dst == NULL)
1242 return -ENOMEM;
1243 memset(sg_dst, 0, (sizeof(struct scatterlist) *
1244 areq->cipher_op_req.entries));
1245 areq->cipher_req.creq.dst = sg_dst;
1246 sg_ndex = sg_dst;
1247
1248 get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr,
1249 &kvaddr, &len, &file_dst);
1250 for (i = 0; i < areq->cipher_op_req.entries; i++)
1251 sg_set_buf(sg_ndex++,
1252 ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset)
1253 + kvaddr), areq->cipher_op_req.pmem.dst[i].len);
1254 sg_mark_end(--sg_ndex);
1255
1256 for (i = 0; i < areq->cipher_op_req.entries; i++)
1257 areq->cipher_op_req.pmem.dst[i].offset +=
1258 (uint32_t)paddr;
1259 } else {
1260 areq->cipher_req.creq.dst = sg_src;
1261 for (i = 0; i < areq->cipher_op_req.entries; i++) {
1262 areq->cipher_op_req.pmem.dst[i].offset =
1263 areq->cipher_op_req.pmem.src[i].offset;
1264 areq->cipher_op_req.pmem.dst[i].len =
1265 areq->cipher_op_req.pmem.src[i].len;
1266 }
1267 }
1268
1269 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1270 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1271
1272 err = submit_req(areq, podev);
1273
1274 kfree(sg_src);
1275 kfree(sg_dst);
1276
1277 if (file_dst)
1278 put_pmem_file(file_dst);
1279 if (file_src)
1280 put_pmem_file(file_src);
1281
1282 return err;
1283};
1284
1285
1286static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq,
1287 struct qcedev_control *podev)
1288{
1289 int err = 0;
1290 int i = 0;
1291 int j = 0;
1292 int k = 0;
1293 int num_entries = 0;
1294 uint32_t total = 0;
1295 struct qcedev_cipher_op_req *saved_req;
1296 struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req;
1297
1298 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1299 if (saved_req == NULL) {
1300 printk(KERN_ERR "%s:Can't Allocate mem:saved_req %x\n",
1301 __func__, (uint32_t)saved_req);
1302 return -ENOMEM;
1303 }
1304 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1305
1306 if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) {
1307
1308 struct qcedev_cipher_op_req req;
1309
1310 /* save the original req structure */
1311 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1312
1313 i = 0;
1314 /* Address 32 KB at a time */
1315 while ((i < req.entries) && (err == 0)) {
1316 if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) {
1317 creq->pmem.src[0].len = QCE_MAX_OPER_DATA;
1318 if (i > 0) {
1319 creq->pmem.src[0].offset =
1320 creq->pmem.src[i].offset;
1321 }
1322
1323 creq->data_len = QCE_MAX_OPER_DATA;
1324 creq->entries = 1;
1325
1326 err =
1327 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
1328 podev);
1329
1330 creq->pmem.src[i].len = req.pmem.src[i].len -
1331 QCE_MAX_OPER_DATA;
1332 creq->pmem.src[i].offset =
1333 req.pmem.src[i].offset +
1334 QCE_MAX_OPER_DATA;
1335 req.pmem.src[i].offset =
1336 creq->pmem.src[i].offset;
1337 req.pmem.src[i].len = creq->pmem.src[i].len;
1338 } else {
1339 total = 0;
1340 for (j = i; j < req.entries; j++) {
1341 num_entries++;
1342 if ((total + creq->pmem.src[j].len)
1343 >= QCE_MAX_OPER_DATA) {
1344 creq->pmem.src[j].len =
1345 QCE_MAX_OPER_DATA - total;
1346 total = QCE_MAX_OPER_DATA;
1347 break;
1348 }
1349 total += creq->pmem.src[j].len;
1350 }
1351
1352 creq->data_len = total;
1353 if (i > 0)
1354 for (k = 0; k < num_entries; k++) {
1355 creq->pmem.src[k].len =
1356 creq->pmem.src[i+k].len;
1357 creq->pmem.src[k].offset =
1358 creq->pmem.src[i+k].offset;
1359 }
1360 creq->entries = num_entries;
1361
1362 i = j;
1363 err =
1364 qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq,
1365 podev);
1366 num_entries = 0;
1367
1368 creq->pmem.src[i].offset =
1369 req.pmem.src[i].offset +
1370 creq->pmem.src[i].len;
1371 creq->pmem.src[i].len =
1372 req.pmem.src[i].len -
1373 creq->pmem.src[i].len;
1374 req.pmem.src[i].offset =
1375 creq->pmem.src[i].offset;
1376 req.pmem.src[i].len =
1377 creq->pmem.src[i].len;
1378
1379 if (creq->pmem.src[i].len == 0)
1380 i++;
1381 }
1382
1383 } /* end of while ((i < req.entries) && (err == 0)) */
1384
1385 } else
1386 err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, podev);
1387
1388 /* Restore the original req structure */
1389 for (i = 0; i < saved_req->entries; i++) {
1390 creq->pmem.src[i].len = saved_req->pmem.src[i].len;
1391 creq->pmem.src[i].offset = saved_req->pmem.src[i].offset;
1392 }
1393 creq->entries = saved_req->entries;
1394 creq->data_len = saved_req->data_len;
1395 kfree(saved_req);
1396
1397 return err;
1398
1399}
1400
1401static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
1402 int *di, struct qcedev_control *podev,
1403 uint8_t *k_align_src)
1404{
1405 int err = 0;
1406 int i = 0;
1407 int dst_i = *di;
1408 struct scatterlist sg_src;
1409 uint32_t byteoffset = 0;
1410 uint8_t *user_src = NULL;
1411 uint8_t *k_align_dst = k_align_src;
1412 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1413
1414
1415 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1416 byteoffset = areq->cipher_op_req.byteoffset;
1417
1418 user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
1419 if (user_src && __copy_from_user((k_align_src + byteoffset),
1420 (void __user *)user_src,
1421 areq->cipher_op_req.vbuf.src[0].len))
1422 return -EFAULT;
1423
1424 k_align_src += areq->cipher_op_req.vbuf.src[0].len;
1425
1426 for (i = 1; i < areq->cipher_op_req.entries; i++) {
1427 user_src =
1428 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
1429 if (user_src && __copy_from_user(k_align_src,
1430 (void __user *)user_src,
1431 areq->cipher_op_req.vbuf.src[i].len)) {
1432 return -EFAULT;
1433 }
1434 k_align_src += areq->cipher_op_req.vbuf.src[i].len;
1435 }
1436
1437 /* restore src beginning */
1438 k_align_src = k_align_dst;
1439 areq->cipher_op_req.data_len += byteoffset;
1440
1441 areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
1442 areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
1443
1444 /* In place encryption/decryption */
1445 sg_set_buf(areq->cipher_req.creq.src,
1446 k_align_dst,
1447 areq->cipher_op_req.data_len);
1448 sg_mark_end(areq->cipher_req.creq.src);
1449
1450 areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
1451 areq->cipher_req.creq.info = areq->cipher_op_req.iv;
1452 areq->cipher_op_req.entries = 1;
1453
1454 err = submit_req(areq, podev);
1455
1456 /* copy data to destination buffer*/
1457 creq->data_len -= byteoffset;
1458
1459 while (creq->data_len > 0) {
1460 if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
1461 if (err == 0 && __copy_to_user(
1462 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1463 (k_align_dst + byteoffset),
1464 creq->vbuf.dst[dst_i].len))
1465 return -EFAULT;
1466
1467 k_align_dst += creq->vbuf.dst[dst_i].len +
1468 byteoffset;
1469 creq->data_len -= creq->vbuf.dst[dst_i].len;
1470 dst_i++;
1471 } else {
1472 if (err == 0 && __copy_to_user(
1473 (void __user *)creq->vbuf.dst[dst_i].vaddr,
1474 (k_align_dst + byteoffset),
1475 creq->data_len))
1476 return -EFAULT;
1477
1478 k_align_dst += creq->data_len;
1479 creq->vbuf.dst[dst_i].len -= creq->data_len;
1480 creq->vbuf.dst[dst_i].vaddr += creq->data_len;
1481 creq->data_len = 0;
1482 }
1483 }
1484 *di = dst_i;
1485
1486 return err;
1487};
1488
1489static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
1490 struct qcedev_control *podev)
1491{
1492 int err = 0;
1493 int di = 0;
1494 int i = 0;
1495 int j = 0;
1496 int k = 0;
1497 uint32_t byteoffset = 0;
1498 int num_entries = 0;
1499 uint32_t total = 0;
1500 uint32_t len;
1501 uint8_t *k_buf_src = NULL;
1502 uint8_t *k_align_src = NULL;
1503 uint32_t max_data_xfer;
1504 struct qcedev_cipher_op_req *saved_req;
1505 struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
1506
1507 /* Verify Source Address's */
1508 for (i = 0; i < areq->cipher_op_req.entries; i++)
1509 if (!access_ok(VERIFY_READ,
1510 (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
1511 areq->cipher_op_req.vbuf.src[i].len))
1512 return -EFAULT;
1513
1514 /* Verify Destination Address's */
1515 if (areq->cipher_op_req.in_place_op != 1)
1516 for (i = 0; i < areq->cipher_op_req.entries; i++)
1517 if (!access_ok(VERIFY_READ,
1518 (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr,
1519 areq->cipher_op_req.vbuf.dst[i].len))
1520 return -EFAULT;
1521
1522 if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
1523 byteoffset = areq->cipher_op_req.byteoffset;
1524 k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
1525 GFP_KERNEL);
1526 if (k_buf_src == NULL) {
1527 printk(KERN_ERR "%s: Can't Allocate memory: k_buf_src 0x%x\n",
1528 __func__, (uint32_t)k_buf_src);
1529 return -ENOMEM;
1530 }
1531 k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src),
1532 CACHE_LINE_SIZE);
1533 max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
1534
1535 saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
1536 if (saved_req == NULL) {
1537 printk(KERN_ERR "%s: Can't Allocate memory:saved_req 0x%x\n",
1538 __func__, (uint32_t)saved_req);
1539 kfree(k_buf_src);
1540 return -ENOMEM;
1541
1542 }
1543 memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
1544
1545 if (areq->cipher_op_req.data_len > max_data_xfer) {
1546 struct qcedev_cipher_op_req req;
1547
1548 /* save the original req structure */
1549 memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
1550
1551 i = 0;
1552 /* Address 32 KB at a time */
1553 while ((i < req.entries) && (err == 0)) {
1554 if (creq->vbuf.src[i].len > max_data_xfer) {
1555 creq->vbuf.src[0].len = max_data_xfer;
1556 if (i > 0) {
1557 creq->vbuf.src[0].vaddr =
1558 creq->vbuf.src[i].vaddr;
1559 }
1560
1561 creq->data_len = max_data_xfer;
1562 creq->entries = 1;
1563
1564 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1565 &di, podev, k_align_src);
1566 if (err < 0) {
1567 kfree(k_buf_src);
1568 kfree(saved_req);
1569 return err;
1570 }
1571
1572 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1573 max_data_xfer;
1574 creq->vbuf.src[i].vaddr =
1575 req.vbuf.src[i].vaddr +
1576 max_data_xfer;
1577 req.vbuf.src[i].vaddr =
1578 creq->vbuf.src[i].vaddr;
1579 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1580
1581 } else {
1582 total = areq->cipher_op_req.byteoffset;
1583 for (j = i; j < req.entries; j++) {
1584 num_entries++;
1585 if ((total + creq->vbuf.src[j].len)
1586 >= max_data_xfer) {
1587 creq->vbuf.src[j].len =
1588 max_data_xfer - total;
1589 total = max_data_xfer;
1590 break;
1591 }
1592 total += creq->vbuf.src[j].len;
1593 }
1594
1595 creq->data_len = total;
1596 if (i > 0)
1597 for (k = 0; k < num_entries; k++) {
1598 creq->vbuf.src[k].len =
1599 creq->vbuf.src[i+k].len;
1600 creq->vbuf.src[k].vaddr =
1601 creq->vbuf.src[i+k].vaddr;
1602 }
1603 creq->entries = num_entries;
1604
1605 i = j;
1606 err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
1607 &di, podev, k_align_src);
1608 if (err < 0) {
1609 kfree(k_buf_src);
1610 kfree(saved_req);
1611 return err;
1612 }
1613
1614 num_entries = 0;
1615 areq->cipher_op_req.byteoffset = 0;
1616
1617 creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
1618 + creq->vbuf.src[i].len;
1619 creq->vbuf.src[i].len = req.vbuf.src[i].len -
1620 creq->vbuf.src[i].len;
1621
1622 req.vbuf.src[i].vaddr =
1623 creq->vbuf.src[i].vaddr;
1624 req.vbuf.src[i].len = creq->vbuf.src[i].len;
1625
1626 if (creq->vbuf.src[i].len == 0)
1627 i++;
1628 }
1629
1630 areq->cipher_op_req.byteoffset = 0;
1631 max_data_xfer = QCE_MAX_OPER_DATA;
1632 byteoffset = 0;
1633
1634 } /* end of while ((i < req.entries) && (err == 0)) */
1635 } else
1636 err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, podev,
1637 k_align_src);
1638
1639 /* Restore the original req structure */
1640 for (i = 0; i < saved_req->entries; i++) {
1641 creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
1642 creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
1643 }
1644 for (len = 0, i = 0; len < saved_req->data_len; i++) {
1645 creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
1646 creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
1647 len += saved_req->vbuf.dst[i].len;
1648 }
1649 creq->entries = saved_req->entries;
1650 creq->data_len = saved_req->data_len;
1651 creq->byteoffset = saved_req->byteoffset;
1652
1653 kfree(saved_req);
1654 kfree(k_buf_src);
1655 return err;
1656
1657}
1658
1659static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
1660 struct qcedev_control *podev)
1661{
1662 if ((req->entries == 0) || (req->data_len == 0))
1663 goto error;
1664 if ((req->alg >= QCEDEV_ALG_LAST) ||
1665 (req->mode >= QCEDEV_AES_DES_MODE_LAST))
1666 goto error;
1667 if (req->alg == QCEDEV_ALG_AES) {
1668 if ((req->mode == QCEDEV_AES_MODE_XTS) &&
1669 (!podev->ce_support.aes_xts))
1670 goto error;
1671 /* if intending to use HW key make sure key fields are set
1672 * correctly and HW key is indeed supported in target
1673 */
1674 if (req->encklen == 0) {
1675 int i;
1676 for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++)
1677 if (req->enckey[i])
1678 goto error;
1679 if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
1680 (req->op != QCEDEV_OPER_DEC_NO_KEY))
1681 if (!podev->platform_support.hw_key_support)
1682 goto error;
1683 } else {
1684 if (req->encklen == QCEDEV_AES_KEY_192) {
1685 if (!podev->ce_support.aes_key_192)
1686 goto error;
1687 } else {
1688 /* if not using HW key make sure key
1689 * length is valid
1690 */
1691 if (!((req->encklen == QCEDEV_AES_KEY_128) ||
1692 (req->encklen == QCEDEV_AES_KEY_256)))
1693 goto error;
1694 }
1695 }
1696 }
1697 /* if using a byteoffset, make sure it is CTR mode using vbuf */
1698 if (req->byteoffset) {
1699 if (req->mode != QCEDEV_AES_MODE_CTR)
1700 goto error;
1701 else { /* if using CTR mode make sure not using Pmem */
1702 if (req->use_pmem)
1703 goto error;
1704 }
1705 }
1706 /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */
1707 if (req->use_pmem) {
1708 if (!req->in_place_op)
1709 goto error;
1710 }
1711 /* Ensure zer ivlen for ECB mode */
1712 if (req->ivlen != 0) {
1713 if ((req->mode == QCEDEV_AES_MODE_ECB) ||
1714 (req->mode == QCEDEV_DES_MODE_ECB))
1715 goto error;
1716 } else {
1717 if ((req->mode != QCEDEV_AES_MODE_ECB) &&
1718 (req->mode != QCEDEV_DES_MODE_ECB))
1719 goto error;
1720 }
1721
1722 return 0;
1723error:
1724 return -EINVAL;
1725
1726}
1727
1728static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
1729 struct qcedev_control *podev)
1730{
1731 if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
1732 (!podev->ce_support.cmac))
1733 goto sha_error;
1734
1735 if ((req->entries == 0) || (req->data_len == 0))
1736 goto sha_error;
1737
1738 if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST)
1739 goto sha_error;
1740
1741 return 0;
1742sha_error:
1743 return -EINVAL;
1744}
1745
1746static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1747{
1748 int err = 0;
1749 struct qcedev_control *podev;
1750 struct qcedev_async_req qcedev_areq;
1751 struct qcedev_stat *pstat;
1752
1753 podev = file->private_data;
1754 if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
1755 printk(KERN_ERR "%s: invalid handle %p\n",
1756 __func__, podev);
1757 return -ENOENT;
1758 }
1759
1760 /* Verify user arguments. */
1761 if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
1762 return -ENOTTY;
1763
1764 init_completion(&qcedev_areq.complete);
1765 pstat = &_qcedev_stat[podev->pdev->id];
1766
1767 switch (cmd) {
1768 case QCEDEV_IOCTL_LOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001769 if (podev->platform_support.ce_shared)
1770 err = qcedev_lock_ce(podev);
1771 else
1772 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001773 break;
1774 case QCEDEV_IOCTL_UNLOCK_CE:
Mona Hossain650c22c2011-07-19 09:54:19 -07001775 if (podev->platform_support.ce_shared)
1776 err = qcedev_unlock_ce(podev);
1777 else
1778 err = -ENOTTY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 break;
1780 case QCEDEV_IOCTL_ENC_REQ:
1781 case QCEDEV_IOCTL_DEC_REQ:
1782 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1783 sizeof(struct qcedev_cipher_op_req)))
1784 return -EFAULT;
1785
1786 if (__copy_from_user(&qcedev_areq.cipher_op_req,
1787 (void __user *)arg,
1788 sizeof(struct qcedev_cipher_op_req)))
1789 return -EFAULT;
1790 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
1791
1792 if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
1793 podev))
1794 return -EINVAL;
1795
1796 if (qcedev_areq.cipher_op_req.use_pmem == QCEDEV_USE_PMEM)
1797 err = qcedev_pmem_ablk_cipher(&qcedev_areq, podev);
1798 else
1799 err = qcedev_vbuf_ablk_cipher(&qcedev_areq, podev);
1800 if (err)
1801 return err;
1802 if (__copy_to_user((void __user *)arg,
1803 &qcedev_areq.cipher_op_req,
1804 sizeof(struct qcedev_cipher_op_req)))
1805 return -EFAULT;
1806 break;
1807
1808 case QCEDEV_IOCTL_SHA_INIT_REQ:
1809
1810 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1811 sizeof(struct qcedev_sha_op_req)))
1812 return -EFAULT;
1813
1814 if (__copy_from_user(&qcedev_areq.sha_op_req,
1815 (void __user *)arg,
1816 sizeof(struct qcedev_sha_op_req)))
1817 return -EFAULT;
1818 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1819 return -EINVAL;
1820 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1821 err = qcedev_hash_init(&qcedev_areq, podev);
1822 if (err)
1823 return err;
1824 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1825 sizeof(struct qcedev_sha_op_req)))
1826 return -EFAULT;
1827 break;
1828 case QCEDEV_IOCTL_GET_CMAC_REQ:
1829 if (!podev->ce_support.cmac)
1830 return -ENOTTY;
1831 case QCEDEV_IOCTL_SHA_UPDATE_REQ:
1832 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1833 sizeof(struct qcedev_sha_op_req)))
1834 return -EFAULT;
1835
1836 if (__copy_from_user(&qcedev_areq.sha_op_req,
1837 (void __user *)arg,
1838 sizeof(struct qcedev_sha_op_req)))
1839 return -EFAULT;
1840 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1841 return -EINVAL;
1842 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1843
1844 if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
1845 err = qcedev_hash_cmac(&qcedev_areq, podev);
1846 if (err)
1847 return err;
1848 } else {
1849 err = qcedev_hash_update(&qcedev_areq, podev);
1850 if (err)
1851 return err;
1852 }
1853
1854 memcpy(&qcedev_areq.sha_op_req.digest[0],
1855 &qcedev_areq.sha_op_req.ctxt.digest[0],
1856 qcedev_areq.sha_op_req.ctxt.diglen);
1857 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1858 sizeof(struct qcedev_sha_op_req)))
1859 return -EFAULT;
1860 break;
1861
1862 case QCEDEV_IOCTL_SHA_FINAL_REQ:
1863
1864 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1865 sizeof(struct qcedev_sha_op_req)))
1866 return -EFAULT;
1867
1868 if (__copy_from_user(&qcedev_areq.sha_op_req,
1869 (void __user *)arg,
1870 sizeof(struct qcedev_sha_op_req)))
1871 return -EFAULT;
1872 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1873 return -EINVAL;
1874 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1875 err = qcedev_hash_final(&qcedev_areq, podev);
1876 if (err)
1877 return err;
1878 qcedev_areq.sha_op_req.diglen =
1879 qcedev_areq.sha_op_req.ctxt.diglen;
1880 memcpy(&qcedev_areq.sha_op_req.digest[0],
1881 &qcedev_areq.sha_op_req.ctxt.digest[0],
1882 qcedev_areq.sha_op_req.ctxt.diglen);
1883 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1884 sizeof(struct qcedev_sha_op_req)))
1885 return -EFAULT;
1886 break;
1887
1888 case QCEDEV_IOCTL_GET_SHA_REQ:
1889
1890 if (!access_ok(VERIFY_WRITE, (void __user *)arg,
1891 sizeof(struct qcedev_sha_op_req)))
1892 return -EFAULT;
1893
1894 if (__copy_from_user(&qcedev_areq.sha_op_req,
1895 (void __user *)arg,
1896 sizeof(struct qcedev_sha_op_req)))
1897 return -EFAULT;
1898 if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
1899 return -EINVAL;
1900 qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
1901 qcedev_hash_init(&qcedev_areq, podev);
1902 err = qcedev_hash_update(&qcedev_areq, podev);
1903 if (err)
1904 return err;
1905 err = qcedev_hash_final(&qcedev_areq, podev);
1906 if (err)
1907 return err;
1908 qcedev_areq.sha_op_req.diglen =
1909 qcedev_areq.sha_op_req.ctxt.diglen;
1910 memcpy(&qcedev_areq.sha_op_req.digest[0],
1911 &qcedev_areq.sha_op_req.ctxt.digest[0],
1912 qcedev_areq.sha_op_req.ctxt.diglen);
1913 if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
1914 sizeof(struct qcedev_sha_op_req)))
1915 return -EFAULT;
1916 break;
1917
1918 default:
1919 return -ENOTTY;
1920 }
1921
1922 return err;
1923}
1924
1925static int qcedev_probe(struct platform_device *pdev)
1926{
1927 void *handle = NULL;
1928 int rc = 0;
1929 struct qcedev_control *podev;
1930 struct msm_ce_hw_support *platform_support;
1931
1932 if (pdev->id >= MAX_QCE_DEVICE) {
1933 printk(KERN_ERR "%s: device id %d exceeds allowed %d\n",
1934 __func__, pdev->id, MAX_QCE_DEVICE);
1935 return -ENOENT;
1936 }
1937 podev = &qce_dev[pdev->id];
1938
1939 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
1940 podev->platform_support.ce_shared = platform_support->ce_shared;
1941 podev->platform_support.shared_ce_resource =
1942 platform_support->shared_ce_resource;
1943 podev->platform_support.hw_key_support =
1944 platform_support->hw_key_support;
Mona Hossain650c22c2011-07-19 09:54:19 -07001945 podev->ce_lock_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001946
1947 INIT_LIST_HEAD(&podev->ready_commands);
1948 podev->active_command = NULL;
1949
1950 spin_lock_init(&podev->lock);
1951
1952 tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
1953
1954 /* open qce */
1955 handle = qce_open(pdev, &rc);
1956 if (handle == NULL) {
1957 platform_set_drvdata(pdev, NULL);
1958 return rc;
1959 }
1960
1961 podev->qce = handle;
1962 podev->pdev = pdev;
1963 platform_set_drvdata(pdev, podev);
1964 qce_hw_support(podev->qce, &podev->ce_support);
1965 rc = misc_register(&podev->miscdevice);
1966
1967 if (rc >= 0)
1968 return 0;
1969
1970 if (handle)
1971 qce_close(handle);
1972 platform_set_drvdata(pdev, NULL);
1973 podev->qce = NULL;
1974 podev->pdev = NULL;
1975 return rc;
1976};
1977
1978static int qcedev_remove(struct platform_device *pdev)
1979{
1980 struct qcedev_control *podev;
1981
1982 podev = platform_get_drvdata(pdev);
1983 if (!podev)
1984 return 0;
1985 if (podev->qce)
1986 qce_close(podev->qce);
1987
1988 if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
1989 misc_deregister(&podev->miscdevice);
1990 tasklet_kill(&podev->done_tasklet);
1991 return 0;
1992};
1993
1994static struct platform_driver qcedev_plat_driver = {
1995 .probe = qcedev_probe,
1996 .remove = qcedev_remove,
1997 .driver = {
1998 .name = "qce",
1999 .owner = THIS_MODULE,
2000 },
2001};
2002
2003static int _disp_stats(int id)
2004{
2005 struct qcedev_stat *pstat;
2006 int len = 0;
2007
2008 pstat = &_qcedev_stat[id];
2009 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
2010 "\nQualcomm QCE dev driver %d Statistics:\n",
2011 id + 1);
2012
2013 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2014 " Encryption operation success : %d\n",
2015 pstat->qcedev_enc_success);
2016 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2017 " Encryption operation fail : %d\n",
2018 pstat->qcedev_enc_fail);
2019 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2020 " Decryption operation success : %d\n",
2021 pstat->qcedev_dec_success);
2022
2023 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
2024 " Encryption operation fail : %d\n",
2025 pstat->qcedev_dec_fail);
2026
2027 return len;
2028}
2029
2030static int _debug_stats_open(struct inode *inode, struct file *file)
2031{
2032 file->private_data = inode->i_private;
2033 return 0;
2034}
2035
2036static ssize_t _debug_stats_read(struct file *file, char __user *buf,
2037 size_t count, loff_t *ppos)
2038{
2039 int rc = -EINVAL;
2040 int qcedev = *((int *) file->private_data);
2041 int len;
2042
2043 len = _disp_stats(qcedev);
2044
2045 rc = simple_read_from_buffer((void __user *) buf, len,
2046 ppos, (void *) _debug_read_buf, len);
2047
2048 return rc;
2049}
2050
2051static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
2052 size_t count, loff_t *ppos)
2053{
2054
2055 int qcedev = *((int *) file->private_data);
2056
2057 memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat));
2058 return count;
2059};
2060
2061static const struct file_operations _debug_stats_ops = {
2062 .open = _debug_stats_open,
2063 .read = _debug_stats_read,
2064 .write = _debug_stats_write,
2065};
2066
2067static int _qcedev_debug_init(void)
2068{
2069 int rc;
2070 char name[DEBUG_MAX_FNAME];
2071 int i;
2072 struct dentry *dent;
2073
2074 _debug_dent = debugfs_create_dir("qcedev", NULL);
2075 if (IS_ERR(_debug_dent)) {
2076 pr_err("qcedev debugfs_create_dir fail, error %ld\n",
2077 PTR_ERR(_debug_dent));
2078 return PTR_ERR(_debug_dent);
2079 }
2080
2081 for (i = 0; i < MAX_QCE_DEVICE; i++) {
2082 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
2083 _debug_qcedev[i] = i;
2084 dent = debugfs_create_file(name, 0644, _debug_dent,
2085 &_debug_qcedev[i], &_debug_stats_ops);
2086 if (dent == NULL) {
2087 pr_err("qcedev debugfs_create_file fail, error %ld\n",
2088 PTR_ERR(dent));
2089 rc = PTR_ERR(dent);
2090 goto err;
2091 }
2092 }
2093 return 0;
2094err:
2095 debugfs_remove_recursive(_debug_dent);
2096 return rc;
2097}
2098
2099static int qcedev_init(void)
2100{
2101 int rc;
2102
2103 rc = _qcedev_debug_init();
2104 if (rc)
2105 return rc;
2106 return platform_driver_register(&qcedev_plat_driver);
2107}
2108
2109static void qcedev_exit(void)
2110{
2111 debugfs_remove_recursive(_debug_dent);
2112 platform_driver_unregister(&qcedev_plat_driver);
2113}
2114
2115MODULE_LICENSE("GPL v2");
2116MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2117MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
Mona Hossain650c22c2011-07-19 09:54:19 -07002118MODULE_VERSION("1.21");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002119
2120module_init(qcedev_init);
2121module_exit(qcedev_exit);