blob: 73627d4de28b75bb0afd852ae0a5a32c408adf10 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto driver
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmapool.h>
20#include <linux/crypto.h>
21#include <linux/kernel.h>
22#include <linux/rtnetlink.h>
23#include <linux/interrupt.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26
27#include <crypto/ctr.h>
28#include <crypto/des.h>
29#include <crypto/aes.h>
30#include <crypto/sha.h>
31#include <crypto/hash.h>
32#include <crypto/algapi.h>
33#include <crypto/aead.h>
34#include <crypto/authenc.h>
35#include <crypto/scatterwalk.h>
36#include <crypto/internal/hash.h>
37
38#include <mach/scm.h>
39#include <linux/platform_data/qcom_crypto_device.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070040#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42
43#define MAX_CRYPTO_DEVICE 3
44#define DEBUG_MAX_FNAME 16
45#define DEBUG_MAX_RW_BUF 1024
46
47struct crypto_stat {
48 u32 aead_sha1_aes_enc;
49 u32 aead_sha1_aes_dec;
50 u32 aead_sha1_des_enc;
51 u32 aead_sha1_des_dec;
52 u32 aead_sha1_3des_enc;
53 u32 aead_sha1_3des_dec;
54 u32 aead_op_success;
55 u32 aead_op_fail;
56 u32 ablk_cipher_aes_enc;
57 u32 ablk_cipher_aes_dec;
58 u32 ablk_cipher_des_enc;
59 u32 ablk_cipher_des_dec;
60 u32 ablk_cipher_3des_enc;
61 u32 ablk_cipher_3des_dec;
62 u32 ablk_cipher_op_success;
63 u32 ablk_cipher_op_fail;
64 u32 sha1_digest;
65 u32 sha256_digest;
66 u32 sha_op_success;
67 u32 sha_op_fail;
68 u32 sha1_hmac_digest;
69 u32 sha256_hmac_digest;
70 u32 sha_hmac_op_success;
71 u32 sha_hmac_op_fail;
72};
73static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE];
74static struct dentry *_debug_dent;
75static char _debug_read_buf[DEBUG_MAX_RW_BUF];
76
77struct crypto_priv {
78 /* CE features supported by target device*/
79 struct msm_ce_hw_support platform_support;
80
81 /* CE features/algorithms supported by HW engine*/
82 struct ce_hw_support ce_support;
83 /* the lock protects queue and req*/
84 spinlock_t lock;
85
86 /* qce handle */
87 void *qce;
88
89 /* list of registered algorithms */
90 struct list_head alg_list;
91
92 /* platform device */
93 struct platform_device *pdev;
94
95 /* current active request */
96 struct crypto_async_request *req;
97 int res;
98
99 /* request queue */
100 struct crypto_queue queue;
101
102 uint32_t ce_lock_count;
103
104 struct work_struct unlock_ce_ws;
105
106 struct tasklet_struct done_tasklet;
107};
108
109
110/*-------------------------------------------------------------------------
111* Resource Locking Service
112* ------------------------------------------------------------------------*/
113#define QCRYPTO_CMD_ID 1
114#define QCRYPTO_CE_LOCK_CMD 1
115#define QCRYPTO_CE_UNLOCK_CMD 0
116#define NUM_RETRY 1000
117#define CE_BUSY 55
118
119static int qcrypto_scm_cmd(int resource, int cmd, int *response)
120{
121#ifdef CONFIG_MSM_SCM
122
123 struct {
124 int resource;
125 int cmd;
126 } cmd_buf;
127
128 cmd_buf.resource = resource;
129 cmd_buf.cmd = cmd;
130
131 return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf,
132 sizeof(cmd_buf), response, sizeof(*response));
133
134#else
135 return 0;
136#endif
137}
138
139static void qcrypto_unlock_ce(struct work_struct *work)
140{
141 int response = 0;
142 unsigned long flags;
143 struct crypto_priv *cp = container_of(work, struct crypto_priv,
144 unlock_ce_ws);
145 if (cp->ce_lock_count == 1)
146 BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource,
147 QCRYPTO_CE_UNLOCK_CMD, &response) != 0);
148 spin_lock_irqsave(&cp->lock, flags);
149 cp->ce_lock_count--;
150 spin_unlock_irqrestore(&cp->lock, flags);
151}
152
153static int qcrypto_lock_ce(struct crypto_priv *cp)
154{
155 unsigned long flags;
156 int response = -CE_BUSY;
157 int i = 0;
158
159 if (cp->ce_lock_count == 0) {
160 do {
161 if (qcrypto_scm_cmd(
162 cp->platform_support.shared_ce_resource,
163 QCRYPTO_CE_LOCK_CMD, &response)) {
164 response = -EINVAL;
165 break;
166 }
167 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
168
169 if ((response == -CE_BUSY) && (i >= NUM_RETRY))
170 return -EUSERS;
171 if (response < 0)
172 return -EINVAL;
173 }
174 spin_lock_irqsave(&cp->lock, flags);
175 cp->ce_lock_count++;
176 spin_unlock_irqrestore(&cp->lock, flags);
177
178
179 return 0;
180}
181
182enum qcrypto_alg_type {
183 QCRYPTO_ALG_CIPHER = 0,
184 QCRYPTO_ALG_SHA = 1,
185 QCRYPTO_ALG_LAST
186};
187
188struct qcrypto_alg {
189 struct list_head entry;
190 struct crypto_alg cipher_alg;
191 struct ahash_alg sha_alg;
192 enum qcrypto_alg_type alg_type;
193 struct crypto_priv *cp;
194};
195
196#define QCRYPTO_MAX_KEY_SIZE 64
197/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
198#define QCRYPTO_MAX_IV_LENGTH 16
199
200struct qcrypto_cipher_ctx {
201 u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
202 u8 iv[QCRYPTO_MAX_IV_LENGTH];
203
204 u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
205 unsigned int enc_key_len;
206
207 unsigned int authsize;
208 unsigned int auth_key_len;
209
210 struct crypto_priv *cp;
211};
212
213struct qcrypto_cipher_req_ctx {
214 u8 *iv;
215 unsigned int ivsize;
216 int aead;
217 struct scatterlist asg; /* Formatted associated data sg */
218 unsigned char *assoc; /* Pointer to formatted assoc data */
219 unsigned int assoclen; /* Save Unformatted assoc data length */
220 struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */
221 enum qce_cipher_alg_enum alg;
222 enum qce_cipher_dir_enum dir;
223 enum qce_cipher_mode_enum mode;
224};
225
226#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
227#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
228#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
229
230static uint8_t _std_init_vector_sha1_uint8[] = {
231 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
232 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
233 0xC3, 0xD2, 0xE1, 0xF0
234};
235
236/* standard initialization vector for SHA-256, source: FIPS 180-2 */
237static uint8_t _std_init_vector_sha256_uint8[] = {
238 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
239 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
240 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
241 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
242};
243
244struct qcrypto_sha_ctx {
245 enum qce_hash_alg_enum alg;
246 uint32_t byte_count[4];
247 uint8_t digest[SHA_MAX_DIGEST_SIZE];
248 uint32_t diglen;
249 uint8_t *tmp_tbuf;
250 uint8_t *trailing_buf;
251 uint8_t *in_buf;
252 uint32_t authkey_in_len;
253 uint32_t trailing_buf_len;
254 uint8_t first_blk;
255 uint8_t last_blk;
256 uint8_t authkey[SHA_MAX_BLOCK_SIZE];
257 struct ahash_request *ahash_req;
258 struct completion ahash_req_complete;
259 struct scatterlist *sg;
260 struct scatterlist tmp_sg;
261 struct crypto_priv *cp;
262};
263
264struct qcrypto_sha_req_ctx {
265 union {
266 struct sha1_state sha1_state_ctx;
267 struct sha256_state sha256_state_ctx;
268 };
269 struct scatterlist *src;
270 uint32_t nbytes;
271};
272
273static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
274 unsigned int len)
275{
276 unsigned n;
277
278 n = len / sizeof(uint32_t) ;
279 for (; n > 0; n--) {
280 *iv = ((*b << 24) & 0xff000000) |
281 (((*(b+1)) << 16) & 0xff0000) |
282 (((*(b+2)) << 8) & 0xff00) |
283 (*(b+3) & 0xff);
284 b += sizeof(uint32_t);
285 iv++;
286 }
287
288 n = len % sizeof(uint32_t);
289 if (n == 3) {
290 *iv = ((*b << 24) & 0xff000000) |
291 (((*(b+1)) << 16) & 0xff0000) |
292 (((*(b+2)) << 8) & 0xff00) ;
293 } else if (n == 2) {
294 *iv = ((*b << 24) & 0xff000000) |
295 (((*(b+1)) << 16) & 0xff0000) ;
296 } else if (n == 1) {
297 *iv = ((*b << 24) & 0xff000000) ;
298 }
299}
300
301static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
302 unsigned int len)
303{
304 unsigned n = len / sizeof(uint32_t);
305
306 for (; n > 0; n--) {
307 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
308 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
309 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
310 *b++ = (unsigned char) (*iv & 0xff);
311 iv++;
312 }
313 n = len % sizeof(uint32_t);
314 if (n == 3) {
315 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
316 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
317 *b = (unsigned char) ((*iv >> 8) & 0xff);
318 } else if (n == 2) {
319 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
320 *b = (unsigned char) ((*iv >> 16) & 0xff);
321 } else if (n == 1) {
322 *b = (unsigned char) ((*iv >> 24) & 0xff);
323 }
324}
325
326static void _start_qcrypto_process(struct crypto_priv *cp);
327
328static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
329 struct ahash_alg *template)
330{
331 struct qcrypto_alg *q_alg;
332 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
333 if (!q_alg) {
334 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
335 PTR_ERR(q_alg));
336 return ERR_PTR(-ENOMEM);
337 }
338
339 q_alg->alg_type = QCRYPTO_ALG_SHA;
340 q_alg->sha_alg = *template;
341 q_alg->cp = cp;
342
343 return q_alg;
344};
345
346static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
347 struct crypto_alg *template)
348{
349 struct qcrypto_alg *q_alg;
350
351 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
352 if (!q_alg) {
353 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
354 PTR_ERR(q_alg));
355 return ERR_PTR(-ENOMEM);
356 }
357
358 q_alg->alg_type = QCRYPTO_ALG_CIPHER;
359 q_alg->cipher_alg = *template;
360 q_alg->cp = cp;
361
362 return q_alg;
363};
364
365static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
366{
367 struct crypto_alg *alg = tfm->__crt_alg;
368 struct qcrypto_alg *q_alg;
369 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
370
371 q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
372
373 /* update context with ptr to cp */
374 ctx->cp = q_alg->cp;
375
376 /* random first IV */
377 get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
378
379 return 0;
380};
381
382static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
383{
384 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
385 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
386 struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
387 struct ahash_alg, halg);
388 struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
389 sha_alg);
390
391 crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
392 /* update context with ptr to cp */
393 sha_ctx->cp = q_alg->cp;
394 sha_ctx->sg = NULL;
395 sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
396 SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
397 if (sha_ctx->tmp_tbuf == NULL) {
398 pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
399 PTR_ERR(sha_ctx->tmp_tbuf));
400 return -ENOMEM;
401 }
402
403 sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
404 if (sha_ctx->trailing_buf == NULL) {
405 kfree(sha_ctx->tmp_tbuf);
406 sha_ctx->tmp_tbuf = NULL;
407 pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
408 PTR_ERR(sha_ctx->trailing_buf));
409 return -ENOMEM;
410 }
411
412 sha_ctx->ahash_req = NULL;
413 return 0;
414};
415
416static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
417{
418 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
419
420 kfree(sha_ctx->tmp_tbuf);
421 sha_ctx->tmp_tbuf = NULL;
422 kfree(sha_ctx->trailing_buf);
423 sha_ctx->trailing_buf = NULL;
424 if (sha_ctx->sg != NULL) {
425 kfree(sha_ctx->sg);
426 sha_ctx->sg = NULL;
427 }
428 if (sha_ctx->ahash_req != NULL) {
429 ahash_request_free(sha_ctx->ahash_req);
430 sha_ctx->ahash_req = NULL;
431 }
432};
433
434
435static void _crypto_sha_hmac_ahash_req_complete(
436 struct crypto_async_request *req, int err);
437
438static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
439{
440 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
441 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
442 int ret = 0;
443
444 ret = _qcrypto_ahash_cra_init(tfm);
445 if (ret)
446 return ret;
447 sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
448
449 if (sha_ctx->ahash_req == NULL) {
450 _qcrypto_ahash_cra_exit(tfm);
451 return -ENOMEM;
452 }
453
454 init_completion(&sha_ctx->ahash_req_complete);
455 ahash_request_set_callback(sha_ctx->ahash_req,
456 CRYPTO_TFM_REQ_MAY_BACKLOG,
457 _crypto_sha_hmac_ahash_req_complete,
458 &sha_ctx->ahash_req_complete);
459 crypto_ahash_clear_flags(ahash, ~0);
460
461 return 0;
462};
463
464static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
465{
466 tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
467 return _qcrypto_cipher_cra_init(tfm);
468};
469
470static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm)
471{
472 tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
473 return _qcrypto_cipher_cra_init(tfm);
474};
475
476static int _disp_stats(int id)
477{
478 struct crypto_stat *pstat;
479 int len = 0;
480
481 pstat = &_qcrypto_stat[id];
482 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
483 "\nQualcomm crypto accelerator %d Statistics:\n",
484 id + 1);
485
486 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
487 " ABLK AES CIPHER encryption : %d\n",
488 pstat->ablk_cipher_aes_enc);
489 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
490 " ABLK AES CIPHER decryption : %d\n",
491 pstat->ablk_cipher_aes_dec);
492
493 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
494 " ABLK DES CIPHER encryption : %d\n",
495 pstat->ablk_cipher_des_enc);
496 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
497 " ABLK DES CIPHER decryption : %d\n",
498 pstat->ablk_cipher_des_dec);
499
500 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
501 " ABLK 3DES CIPHER encryption : %d\n",
502 pstat->ablk_cipher_3des_enc);
503
504 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
505 " ABLK 3DES CIPHER decryption : %d\n",
506 pstat->ablk_cipher_3des_dec);
507
508 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
509 " ABLK CIPHER operation success: %d\n",
510 pstat->ablk_cipher_op_success);
511 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
512 " ABLK CIPHER operation fail : %d\n",
513 pstat->ablk_cipher_op_fail);
514
515 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
516 " AEAD SHA1-AES encryption : %d\n",
517 pstat->aead_sha1_aes_enc);
518 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
519 " AEAD SHA1-AES decryption : %d\n",
520 pstat->aead_sha1_aes_dec);
521
522 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
523 " AEAD SHA1-DES encryption : %d\n",
524 pstat->aead_sha1_des_enc);
525 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
526 " AEAD SHA1-DES decryption : %d\n",
527 pstat->aead_sha1_des_dec);
528
529 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
530 " AEAD SHA1-3DES encryption : %d\n",
531 pstat->aead_sha1_3des_enc);
532 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
533 " AEAD SHA1-3DES decryption : %d\n",
534 pstat->aead_sha1_3des_dec);
535
536 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
537 " AEAD operation success : %d\n",
538 pstat->aead_op_success);
539 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
540 " AEAD operation fail : %d\n",
541 pstat->aead_op_fail);
542 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
543 " SHA1 digest : %d\n",
544 pstat->sha1_digest);
545 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
546 " SHA256 digest : %d\n",
547 pstat->sha256_digest);
548 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
549 " SHA operation fail : %d\n",
550 pstat->sha_op_fail);
551 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
552 " SHA operation success : %d\n",
553 pstat->sha_op_success);
554 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
555 " SHA1 HMAC digest : %d\n",
556 pstat->sha1_hmac_digest);
557 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
558 " SHA256 HMAC digest : %d\n",
559 pstat->sha256_hmac_digest);
560 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
561 " SHA HMAC operation fail : %d\n",
562 pstat->sha_hmac_op_fail);
563 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
564 " SHA HMAC operation success : %d\n",
565 pstat->sha_hmac_op_success);
566 return len;
567}
568
569static int _qcrypto_remove(struct platform_device *pdev)
570{
571 struct crypto_priv *cp;
572 struct qcrypto_alg *q_alg;
573 struct qcrypto_alg *n;
574
575 cp = platform_get_drvdata(pdev);
576
577 if (!cp)
578 return 0;
579
580 list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
581 if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
582 crypto_unregister_alg(&q_alg->cipher_alg);
583 if (q_alg->alg_type == QCRYPTO_ALG_SHA)
584 crypto_unregister_ahash(&q_alg->sha_alg);
585 list_del(&q_alg->entry);
586 kfree(q_alg);
587 }
588
589 if (cp->qce)
590 qce_close(cp->qce);
591 tasklet_kill(&cp->done_tasklet);
592 kfree(cp);
593 return 0;
594};
595
596static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
597 unsigned int len)
598{
599 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
600 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
601 struct crypto_priv *cp = ctx->cp;
602
603 switch (len) {
604 case AES_KEYSIZE_128:
605 case AES_KEYSIZE_256:
606 break;
607 case AES_KEYSIZE_192:
608 if (cp->ce_support.aes_key_192)
609 break;
610 default:
611 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
612 return -EINVAL;
613 };
614 ctx->enc_key_len = len;
615 memcpy(ctx->enc_key, key, len);
616 return 0;
617};
618
619static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
620 unsigned int len)
621{
622 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
623 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
624 u32 tmp[DES_EXPKEY_WORDS];
625 int ret = des_ekey(tmp, key);
626
627 if (len != DES_KEY_SIZE) {
628 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
629 return -EINVAL;
630 };
631
632 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
633 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
634 return -EINVAL;
635 }
636
637 ctx->enc_key_len = len;
638 memcpy(ctx->enc_key, key, len);
639 return 0;
640};
641
642static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
643 unsigned int len)
644{
645 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
646 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
647
648 if (len != DES3_EDE_KEY_SIZE) {
649 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
650 return -EINVAL;
651 };
652 ctx->enc_key_len = len;
653 memcpy(ctx->enc_key, key, len);
654 return 0;
655};
656
657static void req_done(unsigned long data)
658{
659 struct crypto_async_request *areq;
660 struct crypto_priv *cp = (struct crypto_priv *)data;
661 unsigned long flags;
662
663 spin_lock_irqsave(&cp->lock, flags);
664 areq = cp->req;
665 cp->req = NULL;
666 spin_unlock_irqrestore(&cp->lock, flags);
667
668 if (areq)
669 areq->complete(areq, cp->res);
670 _start_qcrypto_process(cp);
671};
672
673static void _update_sha1_ctx(struct ahash_request *req)
674{
675 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
676 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
677 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
678
679 if (sha_ctx->last_blk == 1)
680 memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
681 else {
682 memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
683 memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
684 sha_ctx->trailing_buf_len);
685 _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
686 SHA1_DIGEST_SIZE);
687 }
688 return;
689}
690
691static void _update_sha256_ctx(struct ahash_request *req)
692{
693 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
694 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
695 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
696
697 if (sha_ctx->last_blk == 1)
698 memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
699 else {
700 memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
701 memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
702 sha_ctx->trailing_buf_len);
703 _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
704 SHA256_DIGEST_SIZE);
705 }
706 return;
707}
708
709static void _qce_ahash_complete(void *cookie, unsigned char *digest,
710 unsigned char *authdata, int ret)
711{
712 struct ahash_request *areq = (struct ahash_request *) cookie;
713 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
714 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
715 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
716 struct crypto_priv *cp = sha_ctx->cp;
717 struct crypto_stat *pstat;
718 uint32_t diglen = crypto_ahash_digestsize(ahash);
719 uint32_t *auth32 = (uint32_t *)authdata;
720
721 pstat = &_qcrypto_stat[cp->pdev->id];
722
723#ifdef QCRYPTO_DEBUG
724 dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
725 areq, ret);
726#endif
727
728 if (digest) {
729 memcpy(sha_ctx->digest, digest, diglen);
730 memcpy(areq->result, digest, diglen);
731 }
732 if (authdata) {
733 sha_ctx->byte_count[0] = auth32[0];
734 sha_ctx->byte_count[1] = auth32[1];
735 sha_ctx->byte_count[2] = auth32[2];
736 sha_ctx->byte_count[3] = auth32[3];
737 }
738 areq->src = rctx->src;
739 areq->nbytes = rctx->nbytes;
740
741 if (sha_ctx->sg != NULL) {
742 kfree(sha_ctx->sg);
743 sha_ctx->sg = NULL;
744 }
745
746 if (sha_ctx->alg == QCE_HASH_SHA1)
747 _update_sha1_ctx(areq);
748 if (sha_ctx->alg == QCE_HASH_SHA256)
749 _update_sha256_ctx(areq);
750
751 sha_ctx->last_blk = 0;
752 sha_ctx->first_blk = 0;
753
754 if (ret) {
755 cp->res = -ENXIO;
756 pstat->sha_op_fail++;
757 } else {
758 cp->res = 0;
759 pstat->sha_op_success++;
760 }
761
762 if (cp->platform_support.ce_shared)
763 schedule_work(&cp->unlock_ce_ws);
764 tasklet_schedule(&cp->done_tasklet);
765};
766
767static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
768 unsigned char *iv, int ret)
769{
770 struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
771 struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
772 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
773 struct crypto_priv *cp = ctx->cp;
774 struct crypto_stat *pstat;
775
776 pstat = &_qcrypto_stat[cp->pdev->id];
777
778#ifdef QCRYPTO_DEBUG
779 dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
780 areq, ret);
781#endif
782 if (iv)
783 memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
784
785 if (ret) {
786 cp->res = -ENXIO;
787 pstat->ablk_cipher_op_fail++;
788 } else {
789 cp->res = 0;
790 pstat->ablk_cipher_op_success++;
791 }
792 if (cp->platform_support.ce_shared)
793 schedule_work(&cp->unlock_ce_ws);
794 tasklet_schedule(&cp->done_tasklet);
795};
796
797
798static void _qce_aead_complete(void *cookie, unsigned char *icv,
799 unsigned char *iv, int ret)
800{
801 struct aead_request *areq = (struct aead_request *) cookie;
802 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
803 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
804 struct crypto_priv *cp = ctx->cp;
805 struct qcrypto_cipher_req_ctx *rctx;
806 struct crypto_stat *pstat;
807
808 pstat = &_qcrypto_stat[cp->pdev->id];
809
810 rctx = aead_request_ctx(areq);
811
812 if (rctx->mode == QCE_MODE_CCM) {
813 kzfree(rctx->assoc);
814 areq->assoc = rctx->assoc_sg;
815 areq->assoclen = rctx->assoclen;
816 if (ret) {
817 if (ret == 0x2000000)
818 ret = -EBADMSG;
819 else
820 ret = -ENXIO;
821 }
822 } else {
823 if (ret == 0) {
824 if (rctx->dir == QCE_ENCRYPT) {
825 /* copy the icv to dst */
826 scatterwalk_map_and_copy(icv, areq->dst,
827 areq->cryptlen,
828 ctx->authsize, 1);
829
830 } else {
831 unsigned char tmp[SHA256_DIGESTSIZE];
832
833 /* compare icv from src */
834 scatterwalk_map_and_copy(tmp,
835 areq->src, areq->cryptlen -
836 ctx->authsize, ctx->authsize, 0);
837 ret = memcmp(icv, tmp, ctx->authsize);
838 if (ret != 0)
839 ret = -EBADMSG;
840
841 }
842 } else {
843 ret = -ENXIO;
844 }
845
846 if (iv)
847 memcpy(ctx->iv, iv, crypto_aead_ivsize(aead));
848 }
849
850 if (ret)
851 pstat->aead_op_fail++;
852 else
853 pstat->aead_op_success++;
854
855 if (cp->platform_support.ce_shared)
856 schedule_work(&cp->unlock_ce_ws);
857 tasklet_schedule(&cp->done_tasklet);
858}
859
860static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
861{
862 __be32 data;
863
864 memset(block, 0, csize);
865 block += csize;
866
867 if (csize >= 4)
868 csize = 4;
869 else if (msglen > (1 << (8 * csize)))
870 return -EOVERFLOW;
871
872 data = cpu_to_be32(msglen);
873 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
874
875 return 0;
876}
877
878static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
879{
880 struct aead_request *areq = (struct aead_request *) qreq->areq;
881 unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
882
883 memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
884 /*
885 * Format control info per RFC 3610 and
886 * NIST Special Publication 800-38C
887 */
888 qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
889 if (areq->assoclen)
890 qreq->nonce[0] |= 64;
891
892 return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
893}
894
895static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
896 struct scatterlist *sg)
897{
898 unsigned char *adata;
899 uint32_t len, l;
900
901 qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA));
902 if (!qreq->assoc) {
903 pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
904 PTR_ERR(qreq->assoc));
905 return -ENOMEM;
906 }
907 adata = qreq->assoc;
908 /*
909 * Add control info for associated data
910 * RFC 3610 and NIST Special Publication 800-38C
911 */
912 if (alen < 65280) {
913 *(__be16 *)adata = cpu_to_be16(alen);
914 len = 2;
915 } else {
916 if ((alen >= 65280) && (alen <= 0xffffffff)) {
917 *(__be16 *)adata = cpu_to_be16(0xfffe);
918 *(__be32 *)&adata[2] = cpu_to_be32(alen);
919 len = 6;
920 } else {
921 *(__be16 *)adata = cpu_to_be16(0xffff);
922 *(__be32 *)&adata[6] = cpu_to_be32(alen);
923 len = 10;
924 }
925 }
926 adata += len;
927 qreq->assoclen = ALIGN((alen + len), 16);
928 for (l = alen; l > 0; sg = sg_next(sg)) {
929 memcpy(adata, sg_virt(sg), sg->length);
930 l -= sg->length;
931 adata += sg->length;
932 }
933 return 0;
934}
935
936static void _start_qcrypto_process(struct crypto_priv *cp)
937{
938 struct crypto_async_request *async_req = NULL;
939 struct crypto_async_request *backlog = NULL;
940 unsigned long flags;
941 u32 type;
942 struct qce_req qreq;
943 int ret;
944 struct qcrypto_cipher_req_ctx *rctx;
945 struct qcrypto_cipher_ctx *cipher_ctx;
946 struct qcrypto_sha_ctx *sha_ctx;
947 struct crypto_stat *pstat;
948
949 pstat = &_qcrypto_stat[cp->pdev->id];
950
951again:
952 spin_lock_irqsave(&cp->lock, flags);
953 if (cp->req == NULL) {
954 backlog = crypto_get_backlog(&cp->queue);
955 async_req = crypto_dequeue_request(&cp->queue);
956 cp->req = async_req;
957 }
958 spin_unlock_irqrestore(&cp->lock, flags);
959 if (!async_req)
960 return;
961 if (backlog)
962 backlog->complete(backlog, -EINPROGRESS);
963 type = crypto_tfm_alg_type(async_req->tfm);
964
965 if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
966 struct ablkcipher_request *req;
967 struct crypto_ablkcipher *tfm;
968
969 req = container_of(async_req, struct ablkcipher_request, base);
970 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
971 rctx = ablkcipher_request_ctx(req);
972 tfm = crypto_ablkcipher_reqtfm(req);
973
974 qreq.op = QCE_REQ_ABLK_CIPHER;
975 qreq.qce_cb = _qce_ablk_cipher_complete;
976 qreq.areq = req;
977 qreq.alg = rctx->alg;
978 qreq.dir = rctx->dir;
979 qreq.mode = rctx->mode;
980 qreq.enckey = cipher_ctx->enc_key;
981 qreq.encklen = cipher_ctx->enc_key_len;
982 qreq.iv = req->info;
983 qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
984 qreq.cryptlen = req->nbytes;
985 qreq.use_pmem = 0;
986
987 if ((cipher_ctx->enc_key_len == 0) &&
988 (cp->platform_support.hw_key_support == 0))
989 ret = -EINVAL;
990 else
991 ret = qce_ablk_cipher_req(cp->qce, &qreq);
992 } else {
993 if (type == CRYPTO_ALG_TYPE_AHASH) {
994
995 struct ahash_request *req;
996 struct qce_sha_req sreq;
997
998 req = container_of(async_req,
999 struct ahash_request, base);
1000 sha_ctx = crypto_tfm_ctx(async_req->tfm);
1001
1002 sreq.qce_cb = _qce_ahash_complete;
1003 sreq.digest = &sha_ctx->digest[0];
1004 sreq.src = req->src;
1005 sreq.auth_data[0] = sha_ctx->byte_count[0];
1006 sreq.auth_data[1] = sha_ctx->byte_count[1];
1007 sreq.auth_data[2] = sha_ctx->byte_count[2];
1008 sreq.auth_data[3] = sha_ctx->byte_count[3];
1009 sreq.first_blk = sha_ctx->first_blk;
1010 sreq.last_blk = sha_ctx->last_blk;
1011 sreq.size = req->nbytes;
1012 sreq.areq = req;
1013
1014 switch (sha_ctx->alg) {
1015 case QCE_HASH_SHA1:
1016 sreq.alg = QCE_HASH_SHA1;
1017 sreq.authkey = NULL;
1018 break;
1019 case QCE_HASH_SHA256:
1020 sreq.alg = QCE_HASH_SHA256;
1021 sreq.authkey = NULL;
1022 break;
1023 case QCE_HASH_SHA1_HMAC:
1024 sreq.alg = QCE_HASH_SHA1_HMAC;
1025 sreq.authkey = &sha_ctx->authkey[0];
1026 break;
1027 case QCE_HASH_SHA256_HMAC:
1028 sreq.alg = QCE_HASH_SHA256_HMAC;
1029 sreq.authkey = &sha_ctx->authkey[0];
1030 break;
1031 default:
1032 break;
1033 };
1034 ret = qce_process_sha_req(cp->qce, &sreq);
1035
1036 } else {
1037 struct aead_request *req = container_of(async_req,
1038 struct aead_request, base);
1039 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1040
1041 rctx = aead_request_ctx(req);
1042 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
1043
1044 qreq.op = QCE_REQ_AEAD;
1045 qreq.qce_cb = _qce_aead_complete;
1046
1047 qreq.areq = req;
1048 qreq.alg = rctx->alg;
1049 qreq.dir = rctx->dir;
1050 qreq.mode = rctx->mode;
1051 qreq.iv = rctx->iv;
1052
1053 qreq.enckey = cipher_ctx->enc_key;
1054 qreq.encklen = cipher_ctx->enc_key_len;
1055 qreq.authkey = cipher_ctx->auth_key;
1056 qreq.authklen = cipher_ctx->auth_key_len;
1057 qreq.authsize = crypto_aead_authsize(aead);
1058 qreq.ivsize = crypto_aead_ivsize(aead);
1059 if (qreq.mode == QCE_MODE_CCM) {
1060 if (qreq.dir == QCE_ENCRYPT)
1061 qreq.cryptlen = req->cryptlen;
1062 else
1063 qreq.cryptlen = req->cryptlen -
1064 qreq.authsize;
1065 /* Get NONCE */
1066 ret = qccrypto_set_aead_ccm_nonce(&qreq);
1067 if (ret)
1068 goto done;
1069 /* Format Associated data */
1070 ret = qcrypto_aead_ccm_format_adata(&qreq,
1071 req->assoclen,
1072 req->assoc);
1073 if (ret)
1074 goto done;
1075 /*
1076 * Save the original associated data
1077 * length and sg
1078 */
1079 rctx->assoc_sg = req->assoc;
1080 rctx->assoclen = req->assoclen;
1081 rctx->assoc = qreq.assoc;
1082 /*
1083 * update req with new formatted associated
1084 * data info
1085 */
1086 req->assoc = &rctx->asg;
1087 req->assoclen = qreq.assoclen;
1088 sg_set_buf(req->assoc, qreq.assoc,
1089 req->assoclen);
1090 sg_mark_end(req->assoc);
1091 }
1092 ret = qce_aead_req(cp->qce, &qreq);
1093 }
1094 };
1095done:
1096 if (ret) {
1097
1098 spin_lock_irqsave(&cp->lock, flags);
1099 cp->req = NULL;
1100 spin_unlock_irqrestore(&cp->lock, flags);
1101
1102 if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
1103 pstat->ablk_cipher_op_fail++;
1104 else
1105 if (type == CRYPTO_ALG_TYPE_AHASH)
1106 pstat->sha_op_fail++;
1107 else
1108 pstat->aead_op_fail++;
1109
1110 async_req->complete(async_req, ret);
1111 goto again;
1112 };
1113};
1114
1115static int _qcrypto_queue_req(struct crypto_priv *cp,
1116 struct crypto_async_request *req)
1117{
1118 int ret;
1119 unsigned long flags;
1120
1121 if (cp->platform_support.ce_shared) {
1122 ret = qcrypto_lock_ce(cp);
1123 if (ret)
1124 return ret;
1125 }
1126
1127 spin_lock_irqsave(&cp->lock, flags);
1128 ret = crypto_enqueue_request(&cp->queue, req);
1129 spin_unlock_irqrestore(&cp->lock, flags);
1130 _start_qcrypto_process(cp);
1131
1132 return ret;
1133}
1134
1135static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
1136{
1137 struct qcrypto_cipher_req_ctx *rctx;
1138 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1139 struct crypto_priv *cp = ctx->cp;
1140 struct crypto_stat *pstat;
1141
1142 pstat = &_qcrypto_stat[cp->pdev->id];
1143
1144 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1145 CRYPTO_ALG_TYPE_ABLKCIPHER);
1146#ifdef QCRYPTO_DEBUG
1147 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
1148#endif
1149 rctx = ablkcipher_request_ctx(req);
1150 rctx->aead = 0;
1151 rctx->alg = CIPHER_ALG_AES;
1152 rctx->dir = QCE_ENCRYPT;
1153 rctx->mode = QCE_MODE_ECB;
1154
1155 pstat->ablk_cipher_aes_enc++;
1156 return _qcrypto_queue_req(cp, &req->base);
1157};
1158
1159static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
1160{
1161 struct qcrypto_cipher_req_ctx *rctx;
1162 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1163 struct crypto_priv *cp = ctx->cp;
1164 struct crypto_stat *pstat;
1165
1166 pstat = &_qcrypto_stat[cp->pdev->id];
1167
1168 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1169 CRYPTO_ALG_TYPE_ABLKCIPHER);
1170#ifdef QCRYPTO_DEBUG
1171 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
1172#endif
1173 rctx = ablkcipher_request_ctx(req);
1174 rctx->aead = 0;
1175 rctx->alg = CIPHER_ALG_AES;
1176 rctx->dir = QCE_ENCRYPT;
1177 rctx->mode = QCE_MODE_CBC;
1178
1179 pstat->ablk_cipher_aes_enc++;
1180 return _qcrypto_queue_req(cp, &req->base);
1181};
1182
1183static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
1184{
1185 struct qcrypto_cipher_req_ctx *rctx;
1186 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1187 struct crypto_priv *cp = ctx->cp;
1188 struct crypto_stat *pstat;
1189
1190 pstat = &_qcrypto_stat[cp->pdev->id];
1191
1192 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1193 CRYPTO_ALG_TYPE_ABLKCIPHER);
1194#ifdef QCRYPTO_DEBUG
1195 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
1196#endif
1197 rctx = ablkcipher_request_ctx(req);
1198 rctx->aead = 0;
1199 rctx->alg = CIPHER_ALG_AES;
1200 rctx->dir = QCE_ENCRYPT;
1201 rctx->mode = QCE_MODE_CTR;
1202
1203 pstat->ablk_cipher_aes_enc++;
1204 return _qcrypto_queue_req(cp, &req->base);
1205};
1206
1207static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
1208{
1209 struct qcrypto_cipher_req_ctx *rctx;
1210 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1211 struct crypto_priv *cp = ctx->cp;
1212 struct crypto_stat *pstat;
1213
1214 pstat = &_qcrypto_stat[cp->pdev->id];
1215
1216 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1217 CRYPTO_ALG_TYPE_ABLKCIPHER);
1218 rctx = ablkcipher_request_ctx(req);
1219 rctx->aead = 0;
1220 rctx->alg = CIPHER_ALG_AES;
1221 rctx->dir = QCE_ENCRYPT;
1222 rctx->mode = QCE_MODE_XTS;
1223
1224 pstat->ablk_cipher_aes_enc++;
1225 return _qcrypto_queue_req(cp, &req->base);
1226};
1227
1228static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
1229{
1230 struct qcrypto_cipher_req_ctx *rctx;
1231 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1232 struct crypto_priv *cp = ctx->cp;
1233 struct crypto_stat *pstat;
1234
1235 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1236 return -EINVAL;
1237 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1238 (ctx->auth_key_len != AES_KEYSIZE_256))
1239 return -EINVAL;
1240
1241 pstat = &_qcrypto_stat[cp->pdev->id];
1242
1243 rctx = aead_request_ctx(req);
1244 rctx->aead = 1;
1245 rctx->alg = CIPHER_ALG_AES;
1246 rctx->dir = QCE_ENCRYPT;
1247 rctx->mode = QCE_MODE_CCM;
1248 rctx->iv = req->iv;
1249
1250 pstat->aead_sha1_aes_enc++;
1251 return _qcrypto_queue_req(cp, &req->base);
1252}
1253
1254static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
1255{
1256 struct qcrypto_cipher_req_ctx *rctx;
1257 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1258 struct crypto_priv *cp = ctx->cp;
1259 struct crypto_stat *pstat;
1260
1261 pstat = &_qcrypto_stat[cp->pdev->id];
1262
1263 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1264 CRYPTO_ALG_TYPE_ABLKCIPHER);
1265 rctx = ablkcipher_request_ctx(req);
1266 rctx->aead = 0;
1267 rctx->alg = CIPHER_ALG_DES;
1268 rctx->dir = QCE_ENCRYPT;
1269 rctx->mode = QCE_MODE_ECB;
1270
1271 pstat->ablk_cipher_des_enc++;
1272 return _qcrypto_queue_req(cp, &req->base);
1273};
1274
1275static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
1276{
1277 struct qcrypto_cipher_req_ctx *rctx;
1278 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1279 struct crypto_priv *cp = ctx->cp;
1280 struct crypto_stat *pstat;
1281
1282 pstat = &_qcrypto_stat[cp->pdev->id];
1283
1284 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1285 CRYPTO_ALG_TYPE_ABLKCIPHER);
1286 rctx = ablkcipher_request_ctx(req);
1287 rctx->aead = 0;
1288 rctx->alg = CIPHER_ALG_DES;
1289 rctx->dir = QCE_ENCRYPT;
1290 rctx->mode = QCE_MODE_CBC;
1291
1292 pstat->ablk_cipher_des_enc++;
1293 return _qcrypto_queue_req(cp, &req->base);
1294};
1295
1296static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
1297{
1298 struct qcrypto_cipher_req_ctx *rctx;
1299 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1300 struct crypto_priv *cp = ctx->cp;
1301 struct crypto_stat *pstat;
1302
1303 pstat = &_qcrypto_stat[cp->pdev->id];
1304
1305 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1306 CRYPTO_ALG_TYPE_ABLKCIPHER);
1307 rctx = ablkcipher_request_ctx(req);
1308 rctx->aead = 0;
1309 rctx->alg = CIPHER_ALG_3DES;
1310 rctx->dir = QCE_ENCRYPT;
1311 rctx->mode = QCE_MODE_ECB;
1312
1313 pstat->ablk_cipher_3des_enc++;
1314 return _qcrypto_queue_req(cp, &req->base);
1315};
1316
1317static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
1318{
1319 struct qcrypto_cipher_req_ctx *rctx;
1320 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1321 struct crypto_priv *cp = ctx->cp;
1322 struct crypto_stat *pstat;
1323
1324 pstat = &_qcrypto_stat[cp->pdev->id];
1325
1326 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1327 CRYPTO_ALG_TYPE_ABLKCIPHER);
1328 rctx = ablkcipher_request_ctx(req);
1329 rctx->aead = 0;
1330 rctx->alg = CIPHER_ALG_3DES;
1331 rctx->dir = QCE_ENCRYPT;
1332 rctx->mode = QCE_MODE_CBC;
1333
1334 pstat->ablk_cipher_3des_enc++;
1335 return _qcrypto_queue_req(cp, &req->base);
1336};
1337
1338static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
1339{
1340 struct qcrypto_cipher_req_ctx *rctx;
1341 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1342 struct crypto_priv *cp = ctx->cp;
1343 struct crypto_stat *pstat;
1344
1345 pstat = &_qcrypto_stat[cp->pdev->id];
1346
1347 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1348 CRYPTO_ALG_TYPE_ABLKCIPHER);
1349#ifdef QCRYPTO_DEBUG
1350 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
1351#endif
1352 rctx = ablkcipher_request_ctx(req);
1353 rctx->aead = 0;
1354 rctx->alg = CIPHER_ALG_AES;
1355 rctx->dir = QCE_DECRYPT;
1356 rctx->mode = QCE_MODE_ECB;
1357
1358 pstat->ablk_cipher_aes_dec++;
1359 return _qcrypto_queue_req(cp, &req->base);
1360};
1361
1362static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
1363{
1364 struct qcrypto_cipher_req_ctx *rctx;
1365 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1366 struct crypto_priv *cp = ctx->cp;
1367 struct crypto_stat *pstat;
1368
1369 pstat = &_qcrypto_stat[cp->pdev->id];
1370
1371 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1372 CRYPTO_ALG_TYPE_ABLKCIPHER);
1373#ifdef QCRYPTO_DEBUG
1374 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
1375#endif
1376
1377 rctx = ablkcipher_request_ctx(req);
1378 rctx->aead = 0;
1379 rctx->alg = CIPHER_ALG_AES;
1380 rctx->dir = QCE_DECRYPT;
1381 rctx->mode = QCE_MODE_CBC;
1382
1383 pstat->ablk_cipher_aes_dec++;
1384 return _qcrypto_queue_req(cp, &req->base);
1385};
1386
1387static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
1388{
1389 struct qcrypto_cipher_req_ctx *rctx;
1390 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1391 struct crypto_priv *cp = ctx->cp;
1392 struct crypto_stat *pstat;
1393
1394 pstat = &_qcrypto_stat[cp->pdev->id];
1395
1396 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1397 CRYPTO_ALG_TYPE_ABLKCIPHER);
1398#ifdef QCRYPTO_DEBUG
1399 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
1400#endif
1401 rctx = ablkcipher_request_ctx(req);
1402 rctx->aead = 0;
1403 rctx->alg = CIPHER_ALG_AES;
1404 rctx->mode = QCE_MODE_CTR;
1405
1406 /* Note. There is no such thing as aes/counter mode, decrypt */
1407 rctx->dir = QCE_ENCRYPT;
1408
1409 pstat->ablk_cipher_aes_dec++;
1410 return _qcrypto_queue_req(cp, &req->base);
1411};
1412
1413static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
1414{
1415 struct qcrypto_cipher_req_ctx *rctx;
1416 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1417 struct crypto_priv *cp = ctx->cp;
1418 struct crypto_stat *pstat;
1419
1420 pstat = &_qcrypto_stat[cp->pdev->id];
1421
1422 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1423 CRYPTO_ALG_TYPE_ABLKCIPHER);
1424 rctx = ablkcipher_request_ctx(req);
1425 rctx->aead = 0;
1426 rctx->alg = CIPHER_ALG_DES;
1427 rctx->dir = QCE_DECRYPT;
1428 rctx->mode = QCE_MODE_ECB;
1429
1430 pstat->ablk_cipher_des_dec++;
1431 return _qcrypto_queue_req(cp, &req->base);
1432};
1433
1434static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
1435{
1436 struct qcrypto_cipher_req_ctx *rctx;
1437 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1438 struct crypto_priv *cp = ctx->cp;
1439 struct crypto_stat *pstat;
1440
1441 pstat = &_qcrypto_stat[cp->pdev->id];
1442
1443 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1444 CRYPTO_ALG_TYPE_ABLKCIPHER);
1445 rctx = ablkcipher_request_ctx(req);
1446 rctx->aead = 0;
1447 rctx->alg = CIPHER_ALG_DES;
1448 rctx->dir = QCE_DECRYPT;
1449 rctx->mode = QCE_MODE_CBC;
1450
1451 pstat->ablk_cipher_des_dec++;
1452 return _qcrypto_queue_req(cp, &req->base);
1453};
1454
1455static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
1456{
1457 struct qcrypto_cipher_req_ctx *rctx;
1458 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1459 struct crypto_priv *cp = ctx->cp;
1460 struct crypto_stat *pstat;
1461
1462 pstat = &_qcrypto_stat[cp->pdev->id];
1463
1464 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1465 CRYPTO_ALG_TYPE_ABLKCIPHER);
1466 rctx = ablkcipher_request_ctx(req);
1467 rctx->aead = 0;
1468 rctx->alg = CIPHER_ALG_3DES;
1469 rctx->dir = QCE_DECRYPT;
1470 rctx->mode = QCE_MODE_ECB;
1471
1472 pstat->ablk_cipher_3des_dec++;
1473 return _qcrypto_queue_req(cp, &req->base);
1474};
1475
1476static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
1477{
1478 struct qcrypto_cipher_req_ctx *rctx;
1479 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1480 struct crypto_priv *cp = ctx->cp;
1481 struct crypto_stat *pstat;
1482
1483 pstat = &_qcrypto_stat[cp->pdev->id];
1484
1485 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1486 CRYPTO_ALG_TYPE_ABLKCIPHER);
1487 rctx = ablkcipher_request_ctx(req);
1488 rctx->aead = 0;
1489 rctx->alg = CIPHER_ALG_3DES;
1490 rctx->dir = QCE_DECRYPT;
1491 rctx->mode = QCE_MODE_CBC;
1492
1493 pstat->ablk_cipher_3des_dec++;
1494 return _qcrypto_queue_req(cp, &req->base);
1495};
1496
1497static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
1498{
1499 struct qcrypto_cipher_req_ctx *rctx;
1500 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1501 struct crypto_priv *cp = ctx->cp;
1502 struct crypto_stat *pstat;
1503
1504 pstat = &_qcrypto_stat[cp->pdev->id];
1505
1506 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1507 CRYPTO_ALG_TYPE_ABLKCIPHER);
1508 rctx = ablkcipher_request_ctx(req);
1509 rctx->aead = 0;
1510 rctx->alg = CIPHER_ALG_AES;
1511 rctx->mode = QCE_MODE_XTS;
1512 rctx->dir = QCE_DECRYPT;
1513
1514 pstat->ablk_cipher_aes_dec++;
1515 return _qcrypto_queue_req(cp, &req->base);
1516};
1517
1518
1519static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
1520{
1521 struct qcrypto_cipher_req_ctx *rctx;
1522 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1523 struct crypto_priv *cp = ctx->cp;
1524 struct crypto_stat *pstat;
1525
1526 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1527 return -EINVAL;
1528 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1529 (ctx->auth_key_len != AES_KEYSIZE_256))
1530 return -EINVAL;
1531
1532 pstat = &_qcrypto_stat[cp->pdev->id];
1533
1534 rctx = aead_request_ctx(req);
1535 rctx->aead = 1;
1536 rctx->alg = CIPHER_ALG_AES;
1537 rctx->dir = QCE_DECRYPT;
1538 rctx->mode = QCE_MODE_CCM;
1539 rctx->iv = req->iv;
1540
1541 pstat->aead_sha1_aes_dec++;
1542 return _qcrypto_queue_req(cp, &req->base);
1543}
1544
1545static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
1546 unsigned int authsize)
1547{
1548 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1549
1550 ctx->authsize = authsize;
1551 return 0;
1552}
1553
1554static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
1555 unsigned int authsize)
1556{
1557 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1558
1559 switch (authsize) {
1560 case 4:
1561 case 6:
1562 case 8:
1563 case 10:
1564 case 12:
1565 case 14:
1566 case 16:
1567 break;
1568 default:
1569 return -EINVAL;
1570 }
1571 ctx->authsize = authsize;
1572 return 0;
1573}
1574
1575static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1576 unsigned int keylen)
1577{
1578 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
1579 struct rtattr *rta = (struct rtattr *)key;
1580 struct crypto_authenc_key_param *param;
1581
1582 if (!RTA_OK(rta, keylen))
1583 goto badkey;
1584 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1585 goto badkey;
1586 if (RTA_PAYLOAD(rta) < sizeof(*param))
1587 goto badkey;
1588
1589 param = RTA_DATA(rta);
1590 ctx->enc_key_len = be32_to_cpu(param->enckeylen);
1591
1592 key += RTA_ALIGN(rta->rta_len);
1593 keylen -= RTA_ALIGN(rta->rta_len);
1594
1595 if (keylen < ctx->enc_key_len)
1596 goto badkey;
1597
1598 ctx->auth_key_len = keylen - ctx->enc_key_len;
1599 if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
1600 ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
1601 goto badkey;
1602 memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
1603 memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
1604 memcpy(ctx->auth_key, key, ctx->auth_key_len);
1605
1606 return 0;
1607badkey:
1608 ctx->enc_key_len = 0;
1609 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1610 return -EINVAL;
1611}
1612
1613static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
1614 unsigned int keylen)
1615{
1616 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1617 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1618 struct crypto_priv *cp = ctx->cp;
1619
1620 switch (keylen) {
1621 case AES_KEYSIZE_128:
1622 case AES_KEYSIZE_256:
1623 break;
1624 case AES_KEYSIZE_192:
1625 if (cp->ce_support.aes_key_192)
1626 break;
1627 default:
1628 ctx->enc_key_len = 0;
1629 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1630 return -EINVAL;
1631 };
1632 ctx->enc_key_len = keylen;
1633 memcpy(ctx->enc_key, key, keylen);
1634 ctx->auth_key_len = keylen;
1635 memcpy(ctx->auth_key, key, keylen);
1636
1637 return 0;
1638}
1639
1640static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
1641{
1642 struct qcrypto_cipher_req_ctx *rctx;
1643 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1644 struct crypto_priv *cp = ctx->cp;
1645 struct crypto_stat *pstat;
1646
1647 pstat = &_qcrypto_stat[cp->pdev->id];
1648
1649#ifdef QCRYPTO_DEBUG
1650 dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
1651#endif
1652
1653 rctx = aead_request_ctx(req);
1654 rctx->aead = 1;
1655 rctx->alg = CIPHER_ALG_AES;
1656 rctx->dir = QCE_ENCRYPT;
1657 rctx->mode = QCE_MODE_CBC;
1658 rctx->iv = req->iv;
1659
1660 pstat->aead_sha1_aes_enc++;
1661 return _qcrypto_queue_req(cp, &req->base);
1662}
1663
1664static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
1665{
1666 struct qcrypto_cipher_req_ctx *rctx;
1667 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1668 struct crypto_priv *cp = ctx->cp;
1669 struct crypto_stat *pstat;
1670
1671 pstat = &_qcrypto_stat[cp->pdev->id];
1672
1673#ifdef QCRYPTO_DEBUG
1674 dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
1675#endif
1676 rctx = aead_request_ctx(req);
1677 rctx->aead = 1;
1678 rctx->alg = CIPHER_ALG_AES;
1679 rctx->dir = QCE_DECRYPT;
1680 rctx->mode = QCE_MODE_CBC;
1681 rctx->iv = req->iv;
1682
1683 pstat->aead_sha1_aes_dec++;
1684 return _qcrypto_queue_req(cp, &req->base);
1685}
1686
1687static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
1688{
1689 struct aead_request *areq = &req->areq;
1690 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1691 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1692 struct crypto_priv *cp = ctx->cp;
1693 struct qcrypto_cipher_req_ctx *rctx;
1694 struct crypto_stat *pstat;
1695
1696 pstat = &_qcrypto_stat[cp->pdev->id];
1697
1698 rctx = aead_request_ctx(areq);
1699 rctx->aead = 1;
1700 rctx->alg = CIPHER_ALG_AES;
1701 rctx->dir = QCE_ENCRYPT;
1702 rctx->mode = QCE_MODE_CBC;
1703 rctx->iv = req->giv; /* generated iv */
1704
1705 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1706 /* avoid consecutive packets going out with same IV */
1707 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1708 pstat->aead_sha1_aes_enc++;
1709 return _qcrypto_queue_req(cp, &areq->base);
1710}
1711
1712#ifdef QCRYPTO_AEAD_AES_CTR
1713static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req)
1714{
1715 struct qcrypto_cipher_req_ctx *rctx;
1716 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1717 struct crypto_priv *cp = ctx->cp;
1718 struct crypto_stat *pstat;
1719
1720 pstat = &_qcrypto_stat[cp->pdev->id];
1721
1722 rctx = aead_request_ctx(req);
1723 rctx->aead = 1;
1724 rctx->alg = CIPHER_ALG_AES;
1725 rctx->dir = QCE_ENCRYPT;
1726 rctx->mode = QCE_MODE_CTR;
1727 rctx->iv = req->iv;
1728
1729 pstat->aead_sha1_aes_enc++;
1730 return _qcrypto_queue_req(cp, &req->base);
1731}
1732
1733static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
1734{
1735 struct qcrypto_cipher_req_ctx *rctx;
1736 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1737 struct crypto_priv *cp = ctx->cp;
1738 struct crypto_stat *pstat;
1739
1740 pstat = &_qcrypto_stat[cp->pdev->id];
1741
1742 rctx = aead_request_ctx(req);
1743 rctx->aead = 1;
1744 rctx->alg = CIPHER_ALG_AES;
1745
1746 /* Note. There is no such thing as aes/counter mode, decrypt */
1747 rctx->dir = QCE_ENCRYPT;
1748
1749 rctx->mode = QCE_MODE_CTR;
1750 rctx->iv = req->iv;
1751
1752 pstat->aead_sha1_aes_dec++;
1753 return _qcrypto_queue_req(cp, &req->base);
1754}
1755
1756static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
1757{
1758 struct aead_request *areq = &req->areq;
1759 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1760 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1761 struct crypto_priv *cp = ctx->cp;
1762 struct qcrypto_cipher_req_ctx *rctx;
1763 struct crypto_stat *pstat;
1764
1765 pstat = &_qcrypto_stat[cp->pdev->id];
1766
1767 rctx = aead_request_ctx(areq);
1768 rctx->aead = 1;
1769 rctx->alg = CIPHER_ALG_AES;
1770 rctx->dir = QCE_ENCRYPT;
1771 rctx->mode = QCE_MODE_CTR;
1772 rctx->iv = req->giv; /* generated iv */
1773
1774 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1775 /* avoid consecutive packets going out with same IV */
1776 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1777 pstat->aead_sha1_aes_enc++;
1778 return _qcrypto_queue_req(cp, &areq->base);
1779};
1780#endif /* QCRYPTO_AEAD_AES_CTR */
1781
1782static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
1783{
1784 struct qcrypto_cipher_req_ctx *rctx;
1785 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1786 struct crypto_priv *cp = ctx->cp;
1787 struct crypto_stat *pstat;
1788
1789 pstat = &_qcrypto_stat[cp->pdev->id];
1790
1791 rctx = aead_request_ctx(req);
1792 rctx->aead = 1;
1793 rctx->alg = CIPHER_ALG_DES;
1794 rctx->dir = QCE_ENCRYPT;
1795 rctx->mode = QCE_MODE_CBC;
1796 rctx->iv = req->iv;
1797
1798 pstat->aead_sha1_des_enc++;
1799 return _qcrypto_queue_req(cp, &req->base);
1800}
1801
1802static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
1803{
1804 struct qcrypto_cipher_req_ctx *rctx;
1805 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1806 struct crypto_priv *cp = ctx->cp;
1807 struct crypto_stat *pstat;
1808
1809 pstat = &_qcrypto_stat[cp->pdev->id];
1810
1811 rctx = aead_request_ctx(req);
1812 rctx->aead = 1;
1813 rctx->alg = CIPHER_ALG_DES;
1814 rctx->dir = QCE_DECRYPT;
1815 rctx->mode = QCE_MODE_CBC;
1816 rctx->iv = req->iv;
1817
1818 pstat->aead_sha1_des_dec++;
1819 return _qcrypto_queue_req(cp, &req->base);
1820}
1821
1822static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
1823{
1824 struct aead_request *areq = &req->areq;
1825 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1826 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1827 struct crypto_priv *cp = ctx->cp;
1828 struct qcrypto_cipher_req_ctx *rctx;
1829 struct crypto_stat *pstat;
1830
1831 pstat = &_qcrypto_stat[cp->pdev->id];
1832
1833 rctx = aead_request_ctx(areq);
1834 rctx->aead = 1;
1835 rctx->alg = CIPHER_ALG_DES;
1836 rctx->dir = QCE_ENCRYPT;
1837 rctx->mode = QCE_MODE_CBC;
1838 rctx->iv = req->giv; /* generated iv */
1839
1840 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1841 /* avoid consecutive packets going out with same IV */
1842 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1843 pstat->aead_sha1_des_enc++;
1844 return _qcrypto_queue_req(cp, &areq->base);
1845}
1846
1847static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
1848{
1849 struct qcrypto_cipher_req_ctx *rctx;
1850 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1851 struct crypto_priv *cp = ctx->cp;
1852 struct crypto_stat *pstat;
1853
1854 pstat = &_qcrypto_stat[cp->pdev->id];
1855
1856 rctx = aead_request_ctx(req);
1857 rctx->aead = 1;
1858 rctx->alg = CIPHER_ALG_3DES;
1859 rctx->dir = QCE_ENCRYPT;
1860 rctx->mode = QCE_MODE_CBC;
1861 rctx->iv = req->iv;
1862
1863 pstat->aead_sha1_3des_enc++;
1864 return _qcrypto_queue_req(cp, &req->base);
1865}
1866
1867static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
1868{
1869 struct qcrypto_cipher_req_ctx *rctx;
1870 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1871 struct crypto_priv *cp = ctx->cp;
1872 struct crypto_stat *pstat;
1873
1874 pstat = &_qcrypto_stat[cp->pdev->id];
1875
1876 rctx = aead_request_ctx(req);
1877 rctx->aead = 1;
1878 rctx->alg = CIPHER_ALG_3DES;
1879 rctx->dir = QCE_DECRYPT;
1880 rctx->mode = QCE_MODE_CBC;
1881 rctx->iv = req->iv;
1882
1883 pstat->aead_sha1_3des_dec++;
1884 return _qcrypto_queue_req(cp, &req->base);
1885}
1886
1887static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
1888{
1889 struct aead_request *areq = &req->areq;
1890 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1891 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1892 struct crypto_priv *cp = ctx->cp;
1893 struct qcrypto_cipher_req_ctx *rctx;
1894 struct crypto_stat *pstat;
1895
1896 pstat = &_qcrypto_stat[cp->pdev->id];
1897
1898 rctx = aead_request_ctx(areq);
1899 rctx->aead = 1;
1900 rctx->alg = CIPHER_ALG_3DES;
1901 rctx->dir = QCE_ENCRYPT;
1902 rctx->mode = QCE_MODE_CBC;
1903 rctx->iv = req->giv; /* generated iv */
1904
1905 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1906 /* avoid consecutive packets going out with same IV */
1907 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1908 pstat->aead_sha1_3des_enc++;
1909 return _qcrypto_queue_req(cp, &areq->base);
1910}
1911
1912static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
1913{
1914 int i;
1915
1916 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
1917 nbytes -= sg->length;
1918
1919 return i;
1920}
1921
1922static int _sha_init(struct qcrypto_sha_ctx *ctx)
1923{
1924 ctx->first_blk = 1;
1925 ctx->last_blk = 0;
1926 ctx->byte_count[0] = 0;
1927 ctx->byte_count[1] = 0;
1928 ctx->byte_count[2] = 0;
1929 ctx->byte_count[3] = 0;
1930 ctx->trailing_buf_len = 0;
1931
1932 return 0;
1933};
1934
1935static int _sha1_init(struct ahash_request *req)
1936{
1937 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1938 struct crypto_priv *cp = sha_ctx->cp;
1939 struct crypto_stat *pstat;
1940
1941 pstat = &_qcrypto_stat[cp->pdev->id];
1942
1943 _sha_init(sha_ctx);
1944 sha_ctx->alg = QCE_HASH_SHA1;
1945
1946 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
1947 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
1948 SHA1_DIGEST_SIZE);
1949 sha_ctx->diglen = SHA1_DIGEST_SIZE;
1950 _update_sha1_ctx(req);
1951
1952 pstat->sha1_digest++;
1953 return 0;
1954};
1955
1956static int _sha256_init(struct ahash_request *req)
1957{
1958 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1959 struct crypto_priv *cp = sha_ctx->cp;
1960 struct crypto_stat *pstat;
1961
1962 pstat = &_qcrypto_stat[cp->pdev->id];
1963
1964 _sha_init(sha_ctx);
1965 sha_ctx->alg = QCE_HASH_SHA256;
1966
1967 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
1968 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
1969 SHA256_DIGEST_SIZE);
1970 sha_ctx->diglen = SHA256_DIGEST_SIZE;
1971 _update_sha256_ctx(req);
1972
1973 pstat->sha256_digest++;
1974 return 0;
1975};
1976
1977
1978static int _sha1_export(struct ahash_request *req, void *out)
1979{
1980 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
1981 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
1982 struct sha1_state *out_ctx = (struct sha1_state *)out;
1983
1984 out_ctx->count = sha_state_ctx->count;
1985 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
1986 memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
1987
1988 return 0;
1989};
1990
1991static int _sha1_import(struct ahash_request *req, const void *in)
1992{
1993 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1994 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
1995 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
1996 struct sha1_state *in_ctx = (struct sha1_state *)in;
1997
1998 sha_state_ctx->count = in_ctx->count;
1999 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2000 memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
2001 memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
2002
2003 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2004 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2005 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2006
2007 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2008 (SHA1_BLOCK_SIZE-1));
2009
2010 if (!(in_ctx->count))
2011 sha_ctx->first_blk = 1;
2012 else
2013 sha_ctx->first_blk = 0;
2014
2015 return 0;
2016}
2017static int _sha256_export(struct ahash_request *req, void *out)
2018{
2019 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2020 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2021 struct sha256_state *out_ctx = (struct sha256_state *)out;
2022
2023 out_ctx->count = sha_state_ctx->count;
2024 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
2025 memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
2026
2027 return 0;
2028};
2029
2030static int _sha256_import(struct ahash_request *req, const void *in)
2031{
2032 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2033 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2034 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2035 struct sha256_state *in_ctx = (struct sha256_state *)in;
2036
2037 sha_state_ctx->count = in_ctx->count;
2038 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2039 memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2040 memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2041
2042 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2043 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2044 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2045
2046 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2047 (SHA256_BLOCK_SIZE-1));
2048
2049 if (!(in_ctx->count))
2050 sha_ctx->first_blk = 1;
2051 else
2052 sha_ctx->first_blk = 0;
2053
2054 return 0;
2055}
2056
2057
2058static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
2059{
2060 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2061 struct crypto_priv *cp = sha_ctx->cp;
2062 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2063 uint32_t total, len, i, num_sg;
2064 uint8_t *k_src = NULL;
2065 uint32_t sha_pad_len = 0;
2066 uint32_t end_src = 0;
2067 uint32_t trailing_buf_len = 0;
2068 uint32_t nbytes, index = 0;
2069 uint32_t saved_length = 0;
2070 int ret = 0;
2071
2072 /* check for trailing buffer from previous updates and append it */
2073 total = req->nbytes + sha_ctx->trailing_buf_len;
2074 len = req->nbytes;
2075
2076 if (total <= sha_block_size) {
2077 i = 0;
2078
2079 k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
2080 while (len > 0) {
2081 memcpy(k_src, sg_virt(&req->src[i]),
2082 req->src[i].length);
2083 len -= req->src[i].length;
2084 k_src += req->src[i].length;
2085 i++;
2086 }
2087 sha_ctx->trailing_buf_len = total;
2088 if (sha_ctx->alg == QCE_HASH_SHA1)
2089 _update_sha1_ctx(req);
2090 if (sha_ctx->alg == QCE_HASH_SHA256)
2091 _update_sha256_ctx(req);
2092 return 0;
2093 }
2094
2095 /* save the original req structure fields*/
2096 rctx->src = req->src;
2097 rctx->nbytes = req->nbytes;
2098
2099 memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
2100 sha_ctx->trailing_buf_len);
2101 k_src = &sha_ctx->trailing_buf[0];
2102 /* get new trailing buffer */
2103 sha_pad_len = ALIGN(total, sha_block_size) - total;
2104 trailing_buf_len = sha_block_size - sha_pad_len;
2105 nbytes = total - trailing_buf_len;
2106 num_sg = qcrypto_count_sg(req->src, req->nbytes);
2107
2108 len = sha_ctx->trailing_buf_len;
2109 i = 0;
2110
2111 while (len < nbytes) {
2112 if ((len + req->src[i].length) > nbytes)
2113 break;
2114 len += req->src[i].length;
2115 i++;
2116 }
2117
2118 end_src = i;
2119 if (len < nbytes) {
2120 uint32_t remnant = (nbytes - len);
2121 memcpy(k_src, (sg_virt(&req->src[i]) + remnant),
2122 (req->src[i].length - remnant));
2123 k_src += (req->src[i].length - remnant);
2124 saved_length = req->src[i].length;
2125 index = i;
2126 req->src[i].length = remnant;
2127 i++;
2128 }
2129
2130 while (i < num_sg) {
2131 memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length);
2132 k_src += req->src[i].length;
2133 i++;
2134 }
2135
2136 if (sha_ctx->trailing_buf_len) {
2137 num_sg = end_src + 2;
2138 sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
2139 GFP_KERNEL);
2140 if (sha_ctx->sg == NULL) {
2141 pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
2142 PTR_ERR(sha_ctx->sg));
2143 return -ENOMEM;
2144 }
2145
2146 sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
2147 sha_ctx->trailing_buf_len);
2148 for (i = 1; i < num_sg; i++)
2149 sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
2150 req->src[i-1].length);
2151
2152 req->src = sha_ctx->sg;
2153 sg_mark_end(&sha_ctx->sg[num_sg - 1]);
2154 } else
2155 sg_mark_end(&req->src[end_src]);
2156
2157 req->nbytes = nbytes;
2158 if (saved_length > 0)
2159 rctx->src[index].length = saved_length;
2160 sha_ctx->trailing_buf_len = trailing_buf_len;
2161
2162 ret = _qcrypto_queue_req(cp, &req->base);
2163
2164 return ret;
2165};
2166
2167static int _sha1_update(struct ahash_request *req)
2168{
2169 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2170 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
2171
2172 sha_state_ctx->count += req->nbytes;
2173 return _sha_update(req, SHA1_BLOCK_SIZE);
2174}
2175
2176static int _sha256_update(struct ahash_request *req)
2177{
2178 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2179 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2180
2181 sha_state_ctx->count += req->nbytes;
2182 return _sha_update(req, SHA256_BLOCK_SIZE);
2183}
2184
2185static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
2186{
2187 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2188 struct crypto_priv *cp = sha_ctx->cp;
2189 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2190 int ret = 0;
2191
2192 sha_ctx->last_blk = 1;
2193
2194 /* save the original req structure fields*/
2195 rctx->src = req->src;
2196 rctx->nbytes = req->nbytes;
2197
2198 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2199 sha_ctx->trailing_buf_len);
2200 sg_mark_end(&sha_ctx->tmp_sg);
2201
2202 req->src = &sha_ctx->tmp_sg;
2203 req->nbytes = sha_ctx->trailing_buf_len;
2204
2205 ret = _qcrypto_queue_req(cp, &req->base);
2206
2207 return ret;
2208};
2209
2210static int _sha1_final(struct ahash_request *req)
2211{
2212 return _sha_final(req, SHA1_BLOCK_SIZE);
2213}
2214
2215static int _sha256_final(struct ahash_request *req)
2216{
2217 return _sha_final(req, SHA256_BLOCK_SIZE);
2218}
2219
2220static int _sha_digest(struct ahash_request *req)
2221{
2222 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2223 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2224 struct crypto_priv *cp = sha_ctx->cp;
2225 int ret = 0;
2226
2227 /* save the original req structure fields*/
2228 rctx->src = req->src;
2229 rctx->nbytes = req->nbytes;
2230
2231 sha_ctx->last_blk = 1;
2232 ret = _qcrypto_queue_req(cp, &req->base);
2233
2234 return ret;
2235}
2236
2237static int _sha1_digest(struct ahash_request *req)
2238{
2239 _sha1_init(req);
2240 return _sha_digest(req);
2241}
2242
2243static int _sha256_digest(struct ahash_request *req)
2244{
2245 _sha256_init(req);
2246 return _sha_digest(req);
2247}
2248
2249static void _crypto_sha_hmac_ahash_req_complete(
2250 struct crypto_async_request *req, int err)
2251{
2252 struct completion *ahash_req_complete = req->data;
2253
2254 if (err == -EINPROGRESS)
2255 return;
2256 complete(ahash_req_complete);
2257}
2258
2259static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2260 unsigned int len)
2261{
2262 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2263 int ret = 0;
2264
2265 sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
2266 if (sha_ctx->in_buf == NULL) {
2267 pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
2268 PTR_ERR(sha_ctx->in_buf));
2269 return -ENOMEM;
2270 }
2271 memcpy(sha_ctx->in_buf, key, len);
2272 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
2273 sg_mark_end(&sha_ctx->tmp_sg);
2274
2275 ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
2276 &sha_ctx->authkey[0], len);
2277
2278 ret = _sha_digest(sha_ctx->ahash_req);
2279 if (ret == -EINPROGRESS || ret == -EBUSY) {
2280 ret =
2281 wait_for_completion_interruptible(
2282 &sha_ctx->ahash_req_complete);
2283 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2284 }
2285
2286 sha_ctx->authkey_in_len = len;
2287 kfree(sha_ctx->in_buf);
2288 sha_ctx->in_buf = NULL;
2289
2290 return ret;
2291}
2292
2293static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2294 unsigned int len)
2295{
2296 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2297
2298 if (len <= SHA1_BLOCK_SIZE)
2299 memcpy(&sha_ctx->authkey[0], key, len);
2300 else {
2301 _sha_init(sha_ctx);
2302 sha_ctx->alg = QCE_HASH_SHA1;
2303 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2304 SHA1_DIGEST_SIZE);
2305 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2306 _sha_hmac_setkey(tfm, key, len);
2307 }
2308 return 0;
2309}
2310
2311static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2312 unsigned int len)
2313{
2314 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2315
2316 if (len <= SHA256_BLOCK_SIZE)
2317 memcpy(&sha_ctx->authkey[0], key, len);
2318 else {
2319 _sha_init(sha_ctx);
2320 sha_ctx->alg = QCE_HASH_SHA256;
2321 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2322 SHA256_DIGEST_SIZE);
2323 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2324 _sha_hmac_setkey(tfm, key, len);
2325 }
2326
2327 return 0;
2328}
2329
2330static int _sha_hmac_init_ihash(struct ahash_request *req,
2331 uint32_t sha_block_size)
2332{
2333 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2334 int i;
2335
2336 for (i = 0; i < sha_block_size; i++)
2337 sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
2338 sha_ctx->trailing_buf_len = sha_block_size;
2339
2340 return 0;
2341}
2342
2343static int _sha1_hmac_init(struct ahash_request *req)
2344{
2345 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2346 struct crypto_priv *cp = sha_ctx->cp;
2347 struct crypto_stat *pstat;
2348 int ret = 0;
2349
2350 pstat = &_qcrypto_stat[cp->pdev->id];
2351 pstat->sha1_hmac_digest++;
2352
2353 _sha_init(sha_ctx);
2354 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
2355 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2356 SHA1_DIGEST_SIZE);
2357 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2358 _update_sha1_ctx(req);
2359
2360 if (cp->ce_support.sha_hmac)
2361 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2362 else {
2363 sha_ctx->alg = QCE_HASH_SHA1;
2364 ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
2365 }
2366
2367 return ret;
2368}
2369
2370static int _sha256_hmac_init(struct ahash_request *req)
2371{
2372 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2373 struct crypto_priv *cp = sha_ctx->cp;
2374 struct crypto_stat *pstat;
2375 int ret = 0;
2376
2377 pstat = &_qcrypto_stat[cp->pdev->id];
2378 pstat->sha256_hmac_digest++;
2379
2380 _sha_init(sha_ctx);
2381 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
2382 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2383 SHA256_DIGEST_SIZE);
2384 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2385 _update_sha256_ctx(req);
2386
2387 if (cp->ce_support.sha_hmac)
2388 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2389 else {
2390 sha_ctx->alg = QCE_HASH_SHA256;
2391 ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
2392 }
2393
2394 return ret;
2395}
2396
2397static int _sha1_hmac_update(struct ahash_request *req)
2398{
2399 return _sha1_update(req);
2400}
2401
2402static int _sha256_hmac_update(struct ahash_request *req)
2403{
2404 return _sha256_update(req);
2405}
2406
2407static int _sha_hmac_outer_hash(struct ahash_request *req,
2408 uint32_t sha_digest_size, uint32_t sha_block_size)
2409{
2410 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2411 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2412 struct crypto_priv *cp = sha_ctx->cp;
2413 int i;
2414
2415 for (i = 0; i < sha_block_size; i++)
2416 sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
2417
2418 /* save the original req structure fields*/
2419 rctx->src = req->src;
2420 rctx->nbytes = req->nbytes;
2421
2422 memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
2423 sha_digest_size);
2424
2425 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
2426 sha_digest_size);
2427 sg_mark_end(&sha_ctx->tmp_sg);
2428 req->src = &sha_ctx->tmp_sg;
2429 req->nbytes = sha_block_size + sha_digest_size;
2430
2431 _sha_init(sha_ctx);
2432 if (sha_ctx->alg == QCE_HASH_SHA1) {
2433 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2434 SHA1_DIGEST_SIZE);
2435 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2436 } else {
2437 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2438 SHA256_DIGEST_SIZE);
2439 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2440 }
2441
2442 sha_ctx->last_blk = 1;
2443 return _qcrypto_queue_req(cp, &req->base);
2444}
2445
2446static int _sha_hmac_inner_hash(struct ahash_request *req,
2447 uint32_t sha_digest_size, uint32_t sha_block_size)
2448{
2449 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2450 struct ahash_request *areq = sha_ctx->ahash_req;
2451 struct crypto_priv *cp = sha_ctx->cp;
2452 int ret = 0;
2453
2454 sha_ctx->last_blk = 1;
2455
2456 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2457 sha_ctx->trailing_buf_len);
2458 sg_mark_end(&sha_ctx->tmp_sg);
2459
2460 ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
2461 sha_ctx->trailing_buf_len);
2462 sha_ctx->last_blk = 1;
2463 ret = _qcrypto_queue_req(cp, &areq->base);
2464
2465 if (ret == -EINPROGRESS || ret == -EBUSY) {
2466 ret =
2467 wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
2468 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2469 }
2470
2471 return ret;
2472}
2473
2474static int _sha1_hmac_final(struct ahash_request *req)
2475{
2476 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2477 struct crypto_priv *cp = sha_ctx->cp;
2478 int ret = 0;
2479
2480 if (cp->ce_support.sha_hmac)
2481 return _sha_final(req, SHA1_BLOCK_SIZE);
2482 else {
2483 ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
2484 SHA1_BLOCK_SIZE);
2485 if (ret)
2486 return ret;
2487 return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
2488 SHA1_BLOCK_SIZE);
2489 }
2490}
2491
2492static int _sha256_hmac_final(struct ahash_request *req)
2493{
2494 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2495 struct crypto_priv *cp = sha_ctx->cp;
2496 int ret = 0;
2497
2498 if (cp->ce_support.sha_hmac)
2499 return _sha_final(req, SHA256_BLOCK_SIZE);
2500 else {
2501 ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
2502 SHA256_BLOCK_SIZE);
2503 if (ret)
2504 return ret;
2505 return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
2506 SHA256_BLOCK_SIZE);
2507 }
2508 return 0;
2509}
2510
2511
2512static int _sha1_hmac_digest(struct ahash_request *req)
2513{
2514 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2515 struct crypto_priv *cp = sha_ctx->cp;
2516 struct crypto_stat *pstat;
2517
2518 pstat = &_qcrypto_stat[cp->pdev->id];
2519 pstat->sha1_hmac_digest++;
2520
2521 _sha_init(sha_ctx);
2522 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2523 SHA1_DIGEST_SIZE);
2524 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2525 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2526
2527 return _sha_digest(req);
2528}
2529
2530static int _sha256_hmac_digest(struct ahash_request *req)
2531{
2532 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2533 struct crypto_priv *cp = sha_ctx->cp;
2534 struct crypto_stat *pstat;
2535
2536 pstat = &_qcrypto_stat[cp->pdev->id];
2537 pstat->sha256_hmac_digest++;
2538
2539 _sha_init(sha_ctx);
2540 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2541 SHA256_DIGEST_SIZE);
2542 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2543 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2544
2545 return _sha_digest(req);
2546}
2547
2548static struct ahash_alg _qcrypto_ahash_algos[] = {
2549 {
2550 .init = _sha1_init,
2551 .update = _sha1_update,
2552 .final = _sha1_final,
2553 .export = _sha1_export,
2554 .import = _sha1_import,
2555 .digest = _sha1_digest,
2556 .halg = {
2557 .digestsize = SHA1_DIGEST_SIZE,
2558 .statesize = sizeof(struct sha1_state),
2559 .base = {
2560 .cra_name = "sha1",
2561 .cra_driver_name = "qcrypto-sha1",
2562 .cra_priority = 300,
2563 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2564 CRYPTO_ALG_ASYNC,
2565 .cra_blocksize = SHA1_BLOCK_SIZE,
2566 .cra_ctxsize =
2567 sizeof(struct qcrypto_sha_ctx),
2568 .cra_alignmask = 0,
2569 .cra_type = &crypto_ahash_type,
2570 .cra_module = THIS_MODULE,
2571 .cra_init = _qcrypto_ahash_cra_init,
2572 .cra_exit = _qcrypto_ahash_cra_exit,
2573 },
2574 },
2575 },
2576 {
2577 .init = _sha256_init,
2578 .update = _sha256_update,
2579 .final = _sha256_final,
2580 .export = _sha256_export,
2581 .import = _sha256_import,
2582 .digest = _sha256_digest,
2583 .halg = {
2584 .digestsize = SHA256_DIGEST_SIZE,
2585 .statesize = sizeof(struct sha256_state),
2586 .base = {
2587 .cra_name = "sha256",
2588 .cra_driver_name = "qcrypto-sha256",
2589 .cra_priority = 300,
2590 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2591 CRYPTO_ALG_ASYNC,
2592 .cra_blocksize = SHA256_BLOCK_SIZE,
2593 .cra_ctxsize =
2594 sizeof(struct qcrypto_sha_ctx),
2595 .cra_alignmask = 0,
2596 .cra_type = &crypto_ahash_type,
2597 .cra_module = THIS_MODULE,
2598 .cra_init = _qcrypto_ahash_cra_init,
2599 .cra_exit = _qcrypto_ahash_cra_exit,
2600 },
2601 },
2602 },
2603};
2604
2605static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
2606 {
2607 .init = _sha1_hmac_init,
2608 .update = _sha1_hmac_update,
2609 .final = _sha1_hmac_final,
2610 .export = _sha1_export,
2611 .import = _sha1_import,
2612 .digest = _sha1_hmac_digest,
2613 .setkey = _sha1_hmac_setkey,
2614 .halg = {
2615 .digestsize = SHA1_DIGEST_SIZE,
2616 .statesize = sizeof(struct sha1_state),
2617 .base = {
2618 .cra_name = "hmac(sha1)",
2619 .cra_driver_name = "qcrypto-hmac-sha1",
2620 .cra_priority = 300,
2621 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2622 CRYPTO_ALG_ASYNC,
2623 .cra_blocksize = SHA1_BLOCK_SIZE,
2624 .cra_ctxsize =
2625 sizeof(struct qcrypto_sha_ctx),
2626 .cra_alignmask = 0,
2627 .cra_type = &crypto_ahash_type,
2628 .cra_module = THIS_MODULE,
2629 .cra_init = _qcrypto_ahash_hmac_cra_init,
2630 .cra_exit = _qcrypto_ahash_cra_exit,
2631 },
2632 },
2633 },
2634 {
2635 .init = _sha256_hmac_init,
2636 .update = _sha256_hmac_update,
2637 .final = _sha256_hmac_final,
2638 .export = _sha256_export,
2639 .import = _sha256_import,
2640 .digest = _sha256_hmac_digest,
2641 .setkey = _sha256_hmac_setkey,
2642 .halg = {
2643 .digestsize = SHA256_DIGEST_SIZE,
2644 .statesize = sizeof(struct sha256_state),
2645 .base = {
2646 .cra_name = "hmac(sha256)",
2647 .cra_driver_name = "qcrypto-hmac-sha256",
2648 .cra_priority = 300,
2649 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2650 CRYPTO_ALG_ASYNC,
2651 .cra_blocksize = SHA256_BLOCK_SIZE,
2652 .cra_ctxsize =
2653 sizeof(struct qcrypto_sha_ctx),
2654 .cra_alignmask = 0,
2655 .cra_type = &crypto_ahash_type,
2656 .cra_module = THIS_MODULE,
2657 .cra_init = _qcrypto_ahash_hmac_cra_init,
2658 .cra_exit = _qcrypto_ahash_cra_exit,
2659 },
2660 },
2661 },
2662};
2663
2664static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
2665 {
2666 .cra_name = "ecb(aes)",
2667 .cra_driver_name = "qcrypto-ecb-aes",
2668 .cra_priority = 300,
2669 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2670 .cra_blocksize = AES_BLOCK_SIZE,
2671 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2672 .cra_alignmask = 0,
2673 .cra_type = &crypto_ablkcipher_type,
2674 .cra_module = THIS_MODULE,
2675 .cra_init = _qcrypto_cra_ablkcipher_init,
2676 .cra_u = {
2677 .ablkcipher = {
2678 .min_keysize = AES_MIN_KEY_SIZE,
2679 .max_keysize = AES_MAX_KEY_SIZE,
2680 .setkey = _qcrypto_setkey_aes,
2681 .encrypt = _qcrypto_enc_aes_ecb,
2682 .decrypt = _qcrypto_dec_aes_ecb,
2683 },
2684 },
2685 },
2686 {
2687 .cra_name = "cbc(aes)",
2688 .cra_driver_name = "qcrypto-cbc-aes",
2689 .cra_priority = 300,
2690 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2691 .cra_blocksize = AES_BLOCK_SIZE,
2692 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2693 .cra_alignmask = 0,
2694 .cra_type = &crypto_ablkcipher_type,
2695 .cra_module = THIS_MODULE,
2696 .cra_init = _qcrypto_cra_ablkcipher_init,
2697 .cra_u = {
2698 .ablkcipher = {
2699 .ivsize = AES_BLOCK_SIZE,
2700 .min_keysize = AES_MIN_KEY_SIZE,
2701 .max_keysize = AES_MAX_KEY_SIZE,
2702 .setkey = _qcrypto_setkey_aes,
2703 .encrypt = _qcrypto_enc_aes_cbc,
2704 .decrypt = _qcrypto_dec_aes_cbc,
2705 },
2706 },
2707 },
2708 {
2709 .cra_name = "ctr(aes)",
2710 .cra_driver_name = "qcrypto-ctr-aes",
2711 .cra_priority = 300,
2712 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2713 .cra_blocksize = AES_BLOCK_SIZE,
2714 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2715 .cra_alignmask = 0,
2716 .cra_type = &crypto_ablkcipher_type,
2717 .cra_module = THIS_MODULE,
2718 .cra_init = _qcrypto_cra_ablkcipher_init,
2719 .cra_u = {
2720 .ablkcipher = {
2721 .ivsize = AES_BLOCK_SIZE,
2722 .min_keysize = AES_MIN_KEY_SIZE,
2723 .max_keysize = AES_MAX_KEY_SIZE,
2724 .setkey = _qcrypto_setkey_aes,
2725 .encrypt = _qcrypto_enc_aes_ctr,
2726 .decrypt = _qcrypto_dec_aes_ctr,
2727 },
2728 },
2729 },
2730 {
2731 .cra_name = "ecb(des)",
2732 .cra_driver_name = "qcrypto-ecb-des",
2733 .cra_priority = 300,
2734 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2735 .cra_blocksize = DES_BLOCK_SIZE,
2736 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2737 .cra_alignmask = 0,
2738 .cra_type = &crypto_ablkcipher_type,
2739 .cra_module = THIS_MODULE,
2740 .cra_init = _qcrypto_cra_ablkcipher_init,
2741 .cra_u = {
2742 .ablkcipher = {
2743 .min_keysize = DES_KEY_SIZE,
2744 .max_keysize = DES_KEY_SIZE,
2745 .setkey = _qcrypto_setkey_des,
2746 .encrypt = _qcrypto_enc_des_ecb,
2747 .decrypt = _qcrypto_dec_des_ecb,
2748 },
2749 },
2750 },
2751 {
2752 .cra_name = "cbc(des)",
2753 .cra_driver_name = "qcrypto-cbc-des",
2754 .cra_priority = 300,
2755 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2756 .cra_blocksize = DES_BLOCK_SIZE,
2757 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2758 .cra_alignmask = 0,
2759 .cra_type = &crypto_ablkcipher_type,
2760 .cra_module = THIS_MODULE,
2761 .cra_init = _qcrypto_cra_ablkcipher_init,
2762 .cra_u = {
2763 .ablkcipher = {
2764 .ivsize = DES_BLOCK_SIZE,
2765 .min_keysize = DES_KEY_SIZE,
2766 .max_keysize = DES_KEY_SIZE,
2767 .setkey = _qcrypto_setkey_des,
2768 .encrypt = _qcrypto_enc_des_cbc,
2769 .decrypt = _qcrypto_dec_des_cbc,
2770 },
2771 },
2772 },
2773 {
2774 .cra_name = "ecb(des3_ede)",
2775 .cra_driver_name = "qcrypto-ecb-3des",
2776 .cra_priority = 300,
2777 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2778 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2779 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2780 .cra_alignmask = 0,
2781 .cra_type = &crypto_ablkcipher_type,
2782 .cra_module = THIS_MODULE,
2783 .cra_init = _qcrypto_cra_ablkcipher_init,
2784 .cra_u = {
2785 .ablkcipher = {
2786 .min_keysize = DES3_EDE_KEY_SIZE,
2787 .max_keysize = DES3_EDE_KEY_SIZE,
2788 .setkey = _qcrypto_setkey_3des,
2789 .encrypt = _qcrypto_enc_3des_ecb,
2790 .decrypt = _qcrypto_dec_3des_ecb,
2791 },
2792 },
2793 },
2794 {
2795 .cra_name = "cbc(des3_ede)",
2796 .cra_driver_name = "qcrypto-cbc-3des",
2797 .cra_priority = 300,
2798 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2799 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2800 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2801 .cra_alignmask = 0,
2802 .cra_type = &crypto_ablkcipher_type,
2803 .cra_module = THIS_MODULE,
2804 .cra_init = _qcrypto_cra_ablkcipher_init,
2805 .cra_u = {
2806 .ablkcipher = {
2807 .ivsize = DES3_EDE_BLOCK_SIZE,
2808 .min_keysize = DES3_EDE_KEY_SIZE,
2809 .max_keysize = DES3_EDE_KEY_SIZE,
2810 .setkey = _qcrypto_setkey_3des,
2811 .encrypt = _qcrypto_enc_3des_cbc,
2812 .decrypt = _qcrypto_dec_3des_cbc,
2813 },
2814 },
2815 },
2816};
2817
2818static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
2819 .cra_name = "xts(aes)",
2820 .cra_driver_name = "qcrypto-xts-aes",
2821 .cra_priority = 300,
2822 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2823 .cra_blocksize = AES_BLOCK_SIZE,
2824 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2825 .cra_alignmask = 0,
2826 .cra_type = &crypto_ablkcipher_type,
2827 .cra_module = THIS_MODULE,
2828 .cra_init = _qcrypto_cra_ablkcipher_init,
2829 .cra_u = {
2830 .ablkcipher = {
2831 .ivsize = AES_BLOCK_SIZE,
2832 .min_keysize = AES_MIN_KEY_SIZE,
2833 .max_keysize = AES_MAX_KEY_SIZE,
2834 .setkey = _qcrypto_setkey_aes,
2835 .encrypt = _qcrypto_enc_aes_xts,
2836 .decrypt = _qcrypto_dec_aes_xts,
2837 },
2838 },
2839};
2840
2841static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
2842 {
2843 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2844 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
2845 .cra_priority = 300,
2846 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2847 .cra_blocksize = AES_BLOCK_SIZE,
2848 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2849 .cra_alignmask = 0,
2850 .cra_type = &crypto_aead_type,
2851 .cra_module = THIS_MODULE,
2852 .cra_init = _qcrypto_cra_aead_init,
2853 .cra_u = {
2854 .aead = {
2855 .ivsize = AES_BLOCK_SIZE,
2856 .maxauthsize = SHA1_DIGEST_SIZE,
2857 .setkey = _qcrypto_aead_setkey,
2858 .setauthsize = _qcrypto_aead_setauthsize,
2859 .encrypt = _qcrypto_aead_encrypt_aes_cbc,
2860 .decrypt = _qcrypto_aead_decrypt_aes_cbc,
2861 .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
2862 .geniv = "<built-in>",
2863 }
2864 }
2865 },
2866
2867#ifdef QCRYPTO_AEAD_AES_CTR
2868 {
2869 .cra_name = "authenc(hmac(sha1),ctr(aes))",
2870 .cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes",
2871 .cra_priority = 300,
2872 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2873 .cra_blocksize = AES_BLOCK_SIZE,
2874 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2875 .cra_alignmask = 0,
2876 .cra_type = &crypto_aead_type,
2877 .cra_module = THIS_MODULE,
2878 .cra_init = _qcrypto_cra_aead_init,
2879 .cra_u = {
2880 .aead = {
2881 .ivsize = AES_BLOCK_SIZE,
2882 .maxauthsize = SHA1_DIGEST_SIZE,
2883 .setkey = _qcrypto_aead_setkey,
2884 .setauthsize = _qcrypto_aead_setauthsize,
2885 .encrypt = _qcrypto_aead_encrypt_aes_ctr,
2886 .decrypt = _qcrypto_aead_decrypt_aes_ctr,
2887 .givencrypt = _qcrypto_aead_givencrypt_aes_ctr,
2888 .geniv = "<built-in>",
2889 }
2890 }
2891 },
2892#endif /* QCRYPTO_AEAD_AES_CTR */
2893 {
2894 .cra_name = "authenc(hmac(sha1),cbc(des))",
2895 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
2896 .cra_priority = 300,
2897 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2898 .cra_blocksize = DES_BLOCK_SIZE,
2899 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2900 .cra_alignmask = 0,
2901 .cra_type = &crypto_aead_type,
2902 .cra_module = THIS_MODULE,
2903 .cra_init = _qcrypto_cra_aead_init,
2904 .cra_u = {
2905 .aead = {
2906 .ivsize = DES_BLOCK_SIZE,
2907 .maxauthsize = SHA1_DIGEST_SIZE,
2908 .setkey = _qcrypto_aead_setkey,
2909 .setauthsize = _qcrypto_aead_setauthsize,
2910 .encrypt = _qcrypto_aead_encrypt_des_cbc,
2911 .decrypt = _qcrypto_aead_decrypt_des_cbc,
2912 .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
2913 .geniv = "<built-in>",
2914 }
2915 }
2916 },
2917 {
2918 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2919 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
2920 .cra_priority = 300,
2921 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2922 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2923 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2924 .cra_alignmask = 0,
2925 .cra_type = &crypto_aead_type,
2926 .cra_module = THIS_MODULE,
2927 .cra_init = _qcrypto_cra_aead_init,
2928 .cra_u = {
2929 .aead = {
2930 .ivsize = DES3_EDE_BLOCK_SIZE,
2931 .maxauthsize = SHA1_DIGEST_SIZE,
2932 .setkey = _qcrypto_aead_setkey,
2933 .setauthsize = _qcrypto_aead_setauthsize,
2934 .encrypt = _qcrypto_aead_encrypt_3des_cbc,
2935 .decrypt = _qcrypto_aead_decrypt_3des_cbc,
2936 .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
2937 .geniv = "<built-in>",
2938 }
2939 }
2940 },
2941};
2942
2943static struct crypto_alg _qcrypto_aead_ccm_algo = {
2944 .cra_name = "ccm(aes)",
2945 .cra_driver_name = "qcrypto-aes-ccm",
2946 .cra_priority = 300,
2947 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2948 .cra_blocksize = AES_BLOCK_SIZE,
2949 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2950 .cra_alignmask = 0,
2951 .cra_type = &crypto_aead_type,
2952 .cra_module = THIS_MODULE,
2953 .cra_init = _qcrypto_cra_aead_init,
2954 .cra_u = {
2955 .aead = {
2956 .ivsize = AES_BLOCK_SIZE,
2957 .maxauthsize = SHA1_DIGEST_SIZE,
2958 .setkey = _qcrypto_aead_ccm_setkey,
2959 .setauthsize = _qcrypto_aead_ccm_setauthsize,
2960 .encrypt = _qcrypto_aead_encrypt_aes_ccm,
2961 .decrypt = _qcrypto_aead_decrypt_aes_ccm,
2962 .geniv = "<built-in>",
2963 }
2964 }
2965};
2966
2967
2968static int _qcrypto_probe(struct platform_device *pdev)
2969{
2970 int rc = 0;
2971 void *handle;
2972 struct crypto_priv *cp;
2973 int i;
2974 struct msm_ce_hw_support *platform_support;
2975
2976 if (pdev->id >= MAX_CRYPTO_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002977 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002978 __func__, pdev->id, MAX_CRYPTO_DEVICE);
2979 return -ENOENT;
2980 }
2981
2982 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2983 if (!cp) {
2984 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
2985 PTR_ERR(cp));
2986 return -ENOMEM;
2987 }
2988
2989 /* open qce */
2990 handle = qce_open(pdev, &rc);
2991 if (handle == NULL) {
2992 kfree(cp);
2993 platform_set_drvdata(pdev, NULL);
2994 return rc;
2995 }
2996
2997 INIT_LIST_HEAD(&cp->alg_list);
2998 platform_set_drvdata(pdev, cp);
2999 spin_lock_init(&cp->lock);
3000 tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
3001 crypto_init_queue(&cp->queue, 50);
3002 cp->qce = handle;
3003 cp->pdev = pdev;
3004 qce_hw_support(cp->qce, &cp->ce_support);
3005 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
3006 cp->platform_support.ce_shared = platform_support->ce_shared;
3007 cp->platform_support.shared_ce_resource =
3008 platform_support->shared_ce_resource;
3009 cp->platform_support.hw_key_support =
3010 platform_support->hw_key_support;
3011 cp->ce_lock_count = 0;
3012 cp->platform_support.sha_hmac = platform_support->sha_hmac;
3013
3014 if (cp->platform_support.ce_shared)
3015 INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
3016
3017 /* register crypto cipher algorithms the device supports */
3018 for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
3019 struct qcrypto_alg *q_alg;
3020
3021 q_alg = _qcrypto_cipher_alg_alloc(cp,
3022 &_qcrypto_ablk_cipher_algos[i]);
3023 if (IS_ERR(q_alg)) {
3024 rc = PTR_ERR(q_alg);
3025 goto err;
3026 }
3027 rc = crypto_register_alg(&q_alg->cipher_alg);
3028 if (rc) {
3029 dev_err(&pdev->dev, "%s alg registration failed\n",
3030 q_alg->cipher_alg.cra_driver_name);
3031 kfree(q_alg);
3032 } else {
3033 list_add_tail(&q_alg->entry, &cp->alg_list);
3034 dev_info(&pdev->dev, "%s\n",
3035 q_alg->cipher_alg.cra_driver_name);
3036 }
3037 }
3038
3039 /* register crypto cipher algorithms the device supports */
3040 if (cp->ce_support.aes_xts) {
3041 struct qcrypto_alg *q_alg;
3042
3043 q_alg = _qcrypto_cipher_alg_alloc(cp,
3044 &_qcrypto_ablk_cipher_xts_algo);
3045 if (IS_ERR(q_alg)) {
3046 rc = PTR_ERR(q_alg);
3047 goto err;
3048 }
3049 rc = crypto_register_alg(&q_alg->cipher_alg);
3050 if (rc) {
3051 dev_err(&pdev->dev, "%s alg registration failed\n",
3052 q_alg->cipher_alg.cra_driver_name);
3053 kfree(q_alg);
3054 } else {
3055 list_add_tail(&q_alg->entry, &cp->alg_list);
3056 dev_info(&pdev->dev, "%s\n",
3057 q_alg->cipher_alg.cra_driver_name);
3058 }
3059 }
3060
3061 /*
3062 * Register crypto hash (sha1 and sha256) algorithms the
3063 * device supports
3064 */
3065 for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
3066 struct qcrypto_alg *q_alg = NULL;
3067
3068 q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
3069
3070 if (IS_ERR(q_alg)) {
3071 rc = PTR_ERR(q_alg);
3072 goto err;
3073 }
3074
3075 rc = crypto_register_ahash(&q_alg->sha_alg);
3076 if (rc) {
3077 dev_err(&pdev->dev, "%s alg registration failed\n",
3078 q_alg->sha_alg.halg.base.cra_driver_name);
3079 kfree(q_alg);
3080 } else {
3081 list_add_tail(&q_alg->entry, &cp->alg_list);
3082 dev_info(&pdev->dev, "%s\n",
3083 q_alg->sha_alg.halg.base.cra_driver_name);
3084 }
3085 }
3086
3087 /* register crypto aead (hmac-sha1) algorithms the device supports */
3088 if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) {
3089 for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
3090 i++) {
3091 struct qcrypto_alg *q_alg;
3092
3093 q_alg = _qcrypto_cipher_alg_alloc(cp,
3094 &_qcrypto_aead_sha1_hmac_algos[i]);
3095 if (IS_ERR(q_alg)) {
3096 rc = PTR_ERR(q_alg);
3097 goto err;
3098 }
3099
3100 rc = crypto_register_alg(&q_alg->cipher_alg);
3101 if (rc) {
3102 dev_err(&pdev->dev,
3103 "%s alg registration failed\n",
3104 q_alg->cipher_alg.cra_driver_name);
3105 kfree(q_alg);
3106 } else {
3107 list_add_tail(&q_alg->entry, &cp->alg_list);
3108 dev_info(&pdev->dev, "%s\n",
3109 q_alg->cipher_alg.cra_driver_name);
3110 }
3111 }
3112 }
3113
3114 if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
3115 /* register crypto hmac algorithms the device supports */
3116 for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
3117 struct qcrypto_alg *q_alg = NULL;
3118
3119 q_alg = _qcrypto_sha_alg_alloc(cp,
3120 &_qcrypto_sha_hmac_algos[i]);
3121
3122 if (IS_ERR(q_alg)) {
3123 rc = PTR_ERR(q_alg);
3124 goto err;
3125 }
3126
3127 rc = crypto_register_ahash(&q_alg->sha_alg);
3128 if (rc) {
3129 dev_err(&pdev->dev,
3130 "%s alg registration failed\n",
3131 q_alg->sha_alg.halg.base.cra_driver_name);
3132 kfree(q_alg);
3133 } else {
3134 list_add_tail(&q_alg->entry, &cp->alg_list);
3135 dev_info(&pdev->dev, "%s\n",
3136 q_alg->sha_alg.halg.base.cra_driver_name);
3137 }
3138 }
3139 }
3140 /*
3141 * Register crypto cipher (aes-ccm) algorithms the
3142 * device supports
3143 */
3144 if (cp->ce_support.aes_ccm) {
3145 struct qcrypto_alg *q_alg;
3146
3147 q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
3148 if (IS_ERR(q_alg)) {
3149 rc = PTR_ERR(q_alg);
3150 goto err;
3151 }
3152 rc = crypto_register_alg(&q_alg->cipher_alg);
3153 if (rc) {
3154 dev_err(&pdev->dev, "%s alg registration failed\n",
3155 q_alg->cipher_alg.cra_driver_name);
3156 kfree(q_alg);
3157 } else {
3158 list_add_tail(&q_alg->entry, &cp->alg_list);
3159 dev_info(&pdev->dev, "%s\n",
3160 q_alg->cipher_alg.cra_driver_name);
3161 }
3162 }
3163
3164 return 0;
3165err:
3166 _qcrypto_remove(pdev);
3167 return rc;
3168};
3169
3170static struct platform_driver _qualcomm_crypto = {
3171 .probe = _qcrypto_probe,
3172 .remove = _qcrypto_remove,
3173 .driver = {
3174 .owner = THIS_MODULE,
3175 .name = "qcrypto",
3176 },
3177};
3178
3179static int _debug_qcrypto[MAX_CRYPTO_DEVICE];
3180
3181static int _debug_stats_open(struct inode *inode, struct file *file)
3182{
3183 file->private_data = inode->i_private;
3184 return 0;
3185}
3186
3187static ssize_t _debug_stats_read(struct file *file, char __user *buf,
3188 size_t count, loff_t *ppos)
3189{
3190 int rc = -EINVAL;
3191 int qcrypto = *((int *) file->private_data);
3192 int len;
3193
3194 len = _disp_stats(qcrypto);
3195
3196 rc = simple_read_from_buffer((void __user *) buf, len,
3197 ppos, (void *) _debug_read_buf, len);
3198
3199 return rc;
3200}
3201
3202static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
3203 size_t count, loff_t *ppos)
3204{
3205
3206 int qcrypto = *((int *) file->private_data);
3207
3208 memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat));
3209 return count;
3210};
3211
3212static const struct file_operations _debug_stats_ops = {
3213 .open = _debug_stats_open,
3214 .read = _debug_stats_read,
3215 .write = _debug_stats_write,
3216};
3217
3218static int _qcrypto_debug_init(void)
3219{
3220 int rc;
3221 char name[DEBUG_MAX_FNAME];
3222 int i;
3223 struct dentry *dent;
3224
3225 _debug_dent = debugfs_create_dir("qcrypto", NULL);
3226 if (IS_ERR(_debug_dent)) {
3227 pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
3228 PTR_ERR(_debug_dent));
3229 return PTR_ERR(_debug_dent);
3230 }
3231
3232 for (i = 0; i < MAX_CRYPTO_DEVICE; i++) {
3233 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
3234 _debug_qcrypto[i] = i;
3235 dent = debugfs_create_file(name, 0644, _debug_dent,
3236 &_debug_qcrypto[i], &_debug_stats_ops);
3237 if (dent == NULL) {
3238 pr_err("qcrypto debugfs_create_file fail, error %ld\n",
3239 PTR_ERR(dent));
3240 rc = PTR_ERR(dent);
3241 goto err;
3242 }
3243 }
3244 return 0;
3245err:
3246 debugfs_remove_recursive(_debug_dent);
3247 return rc;
3248}
3249
3250static int __init _qcrypto_init(void)
3251{
3252 int rc;
3253
3254 rc = _qcrypto_debug_init();
3255 if (rc)
3256 return rc;
3257
3258 return platform_driver_register(&_qualcomm_crypto);
3259}
3260
3261static void __exit _qcrypto_exit(void)
3262{
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003263 pr_debug("%s Unregister QCRYPTO\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264 debugfs_remove_recursive(_debug_dent);
3265 platform_driver_unregister(&_qualcomm_crypto);
3266}
3267
3268module_init(_qcrypto_init);
3269module_exit(_qcrypto_exit);
3270
3271MODULE_LICENSE("GPL v2");
3272MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
3273MODULE_DESCRIPTION("Qualcomm Crypto driver");
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003274MODULE_VERSION("1.19");