blob: 67301877c8cf60605691713be802c948d330ebb9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto driver
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/types.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmapool.h>
20#include <linux/crypto.h>
21#include <linux/kernel.h>
22#include <linux/rtnetlink.h>
23#include <linux/interrupt.h>
24#include <linux/spinlock.h>
25#include <linux/debugfs.h>
26
27#include <crypto/ctr.h>
28#include <crypto/des.h>
29#include <crypto/aes.h>
30#include <crypto/sha.h>
31#include <crypto/hash.h>
32#include <crypto/algapi.h>
33#include <crypto/aead.h>
34#include <crypto/authenc.h>
35#include <crypto/scatterwalk.h>
36#include <crypto/internal/hash.h>
37
38#include <mach/scm.h>
39#include <linux/platform_data/qcom_crypto_device.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070040#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
42
43#define MAX_CRYPTO_DEVICE 3
44#define DEBUG_MAX_FNAME 16
45#define DEBUG_MAX_RW_BUF 1024
46
47struct crypto_stat {
48 u32 aead_sha1_aes_enc;
49 u32 aead_sha1_aes_dec;
50 u32 aead_sha1_des_enc;
51 u32 aead_sha1_des_dec;
52 u32 aead_sha1_3des_enc;
53 u32 aead_sha1_3des_dec;
54 u32 aead_op_success;
55 u32 aead_op_fail;
56 u32 ablk_cipher_aes_enc;
57 u32 ablk_cipher_aes_dec;
58 u32 ablk_cipher_des_enc;
59 u32 ablk_cipher_des_dec;
60 u32 ablk_cipher_3des_enc;
61 u32 ablk_cipher_3des_dec;
62 u32 ablk_cipher_op_success;
63 u32 ablk_cipher_op_fail;
64 u32 sha1_digest;
65 u32 sha256_digest;
66 u32 sha_op_success;
67 u32 sha_op_fail;
68 u32 sha1_hmac_digest;
69 u32 sha256_hmac_digest;
70 u32 sha_hmac_op_success;
71 u32 sha_hmac_op_fail;
72};
73static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE];
74static struct dentry *_debug_dent;
75static char _debug_read_buf[DEBUG_MAX_RW_BUF];
76
77struct crypto_priv {
78 /* CE features supported by target device*/
79 struct msm_ce_hw_support platform_support;
80
81 /* CE features/algorithms supported by HW engine*/
82 struct ce_hw_support ce_support;
83 /* the lock protects queue and req*/
84 spinlock_t lock;
85
86 /* qce handle */
87 void *qce;
88
89 /* list of registered algorithms */
90 struct list_head alg_list;
91
92 /* platform device */
93 struct platform_device *pdev;
94
95 /* current active request */
96 struct crypto_async_request *req;
97 int res;
98
99 /* request queue */
100 struct crypto_queue queue;
101
102 uint32_t ce_lock_count;
103
104 struct work_struct unlock_ce_ws;
105
106 struct tasklet_struct done_tasklet;
107};
108
109
110/*-------------------------------------------------------------------------
111* Resource Locking Service
112* ------------------------------------------------------------------------*/
113#define QCRYPTO_CMD_ID 1
114#define QCRYPTO_CE_LOCK_CMD 1
115#define QCRYPTO_CE_UNLOCK_CMD 0
116#define NUM_RETRY 1000
117#define CE_BUSY 55
118
119static int qcrypto_scm_cmd(int resource, int cmd, int *response)
120{
121#ifdef CONFIG_MSM_SCM
122
123 struct {
124 int resource;
125 int cmd;
126 } cmd_buf;
127
128 cmd_buf.resource = resource;
129 cmd_buf.cmd = cmd;
130
131 return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf,
132 sizeof(cmd_buf), response, sizeof(*response));
133
134#else
135 return 0;
136#endif
137}
138
139static void qcrypto_unlock_ce(struct work_struct *work)
140{
141 int response = 0;
142 unsigned long flags;
143 struct crypto_priv *cp = container_of(work, struct crypto_priv,
144 unlock_ce_ws);
145 if (cp->ce_lock_count == 1)
146 BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource,
147 QCRYPTO_CE_UNLOCK_CMD, &response) != 0);
148 spin_lock_irqsave(&cp->lock, flags);
149 cp->ce_lock_count--;
150 spin_unlock_irqrestore(&cp->lock, flags);
151}
152
153static int qcrypto_lock_ce(struct crypto_priv *cp)
154{
155 unsigned long flags;
156 int response = -CE_BUSY;
157 int i = 0;
158
159 if (cp->ce_lock_count == 0) {
160 do {
161 if (qcrypto_scm_cmd(
162 cp->platform_support.shared_ce_resource,
163 QCRYPTO_CE_LOCK_CMD, &response)) {
164 response = -EINVAL;
165 break;
166 }
167 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
168
169 if ((response == -CE_BUSY) && (i >= NUM_RETRY))
170 return -EUSERS;
171 if (response < 0)
172 return -EINVAL;
173 }
174 spin_lock_irqsave(&cp->lock, flags);
175 cp->ce_lock_count++;
176 spin_unlock_irqrestore(&cp->lock, flags);
177
178
179 return 0;
180}
181
182enum qcrypto_alg_type {
183 QCRYPTO_ALG_CIPHER = 0,
184 QCRYPTO_ALG_SHA = 1,
185 QCRYPTO_ALG_LAST
186};
187
188struct qcrypto_alg {
189 struct list_head entry;
190 struct crypto_alg cipher_alg;
191 struct ahash_alg sha_alg;
192 enum qcrypto_alg_type alg_type;
193 struct crypto_priv *cp;
194};
195
196#define QCRYPTO_MAX_KEY_SIZE 64
197/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
198#define QCRYPTO_MAX_IV_LENGTH 16
199
200struct qcrypto_cipher_ctx {
201 u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
202 u8 iv[QCRYPTO_MAX_IV_LENGTH];
203
204 u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
205 unsigned int enc_key_len;
206
207 unsigned int authsize;
208 unsigned int auth_key_len;
209
210 struct crypto_priv *cp;
211};
212
213struct qcrypto_cipher_req_ctx {
214 u8 *iv;
215 unsigned int ivsize;
216 int aead;
217 struct scatterlist asg; /* Formatted associated data sg */
218 unsigned char *assoc; /* Pointer to formatted assoc data */
219 unsigned int assoclen; /* Save Unformatted assoc data length */
220 struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */
221 enum qce_cipher_alg_enum alg;
222 enum qce_cipher_dir_enum dir;
223 enum qce_cipher_mode_enum mode;
224};
225
226#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
227#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
228#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
229
230static uint8_t _std_init_vector_sha1_uint8[] = {
231 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
232 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
233 0xC3, 0xD2, 0xE1, 0xF0
234};
235
236/* standard initialization vector for SHA-256, source: FIPS 180-2 */
237static uint8_t _std_init_vector_sha256_uint8[] = {
238 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
239 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
240 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
241 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
242};
243
244struct qcrypto_sha_ctx {
245 enum qce_hash_alg_enum alg;
246 uint32_t byte_count[4];
247 uint8_t digest[SHA_MAX_DIGEST_SIZE];
248 uint32_t diglen;
249 uint8_t *tmp_tbuf;
250 uint8_t *trailing_buf;
251 uint8_t *in_buf;
252 uint32_t authkey_in_len;
253 uint32_t trailing_buf_len;
254 uint8_t first_blk;
255 uint8_t last_blk;
256 uint8_t authkey[SHA_MAX_BLOCK_SIZE];
257 struct ahash_request *ahash_req;
258 struct completion ahash_req_complete;
259 struct scatterlist *sg;
260 struct scatterlist tmp_sg;
261 struct crypto_priv *cp;
262};
263
264struct qcrypto_sha_req_ctx {
265 union {
266 struct sha1_state sha1_state_ctx;
267 struct sha256_state sha256_state_ctx;
268 };
269 struct scatterlist *src;
270 uint32_t nbytes;
271};
272
273static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
274 unsigned int len)
275{
276 unsigned n;
277
278 n = len / sizeof(uint32_t) ;
279 for (; n > 0; n--) {
280 *iv = ((*b << 24) & 0xff000000) |
281 (((*(b+1)) << 16) & 0xff0000) |
282 (((*(b+2)) << 8) & 0xff00) |
283 (*(b+3) & 0xff);
284 b += sizeof(uint32_t);
285 iv++;
286 }
287
288 n = len % sizeof(uint32_t);
289 if (n == 3) {
290 *iv = ((*b << 24) & 0xff000000) |
291 (((*(b+1)) << 16) & 0xff0000) |
292 (((*(b+2)) << 8) & 0xff00) ;
293 } else if (n == 2) {
294 *iv = ((*b << 24) & 0xff000000) |
295 (((*(b+1)) << 16) & 0xff0000) ;
296 } else if (n == 1) {
297 *iv = ((*b << 24) & 0xff000000) ;
298 }
299}
300
301static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
302 unsigned int len)
303{
304 unsigned n = len / sizeof(uint32_t);
305
306 for (; n > 0; n--) {
307 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
308 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
309 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
310 *b++ = (unsigned char) (*iv & 0xff);
311 iv++;
312 }
313 n = len % sizeof(uint32_t);
314 if (n == 3) {
315 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
316 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
317 *b = (unsigned char) ((*iv >> 8) & 0xff);
318 } else if (n == 2) {
319 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
320 *b = (unsigned char) ((*iv >> 16) & 0xff);
321 } else if (n == 1) {
322 *b = (unsigned char) ((*iv >> 24) & 0xff);
323 }
324}
325
326static void _start_qcrypto_process(struct crypto_priv *cp);
327
328static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
329 struct ahash_alg *template)
330{
331 struct qcrypto_alg *q_alg;
332 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
333 if (!q_alg) {
334 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
335 PTR_ERR(q_alg));
336 return ERR_PTR(-ENOMEM);
337 }
338
339 q_alg->alg_type = QCRYPTO_ALG_SHA;
340 q_alg->sha_alg = *template;
341 q_alg->cp = cp;
342
343 return q_alg;
344};
345
346static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
347 struct crypto_alg *template)
348{
349 struct qcrypto_alg *q_alg;
350
351 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
352 if (!q_alg) {
353 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
354 PTR_ERR(q_alg));
355 return ERR_PTR(-ENOMEM);
356 }
357
358 q_alg->alg_type = QCRYPTO_ALG_CIPHER;
359 q_alg->cipher_alg = *template;
360 q_alg->cp = cp;
361
362 return q_alg;
363};
364
365static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
366{
367 struct crypto_alg *alg = tfm->__crt_alg;
368 struct qcrypto_alg *q_alg;
369 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
370
371 q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
372
373 /* update context with ptr to cp */
374 ctx->cp = q_alg->cp;
375
376 /* random first IV */
377 get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
378
379 return 0;
380};
381
382static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
383{
384 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
385 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
386 struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
387 struct ahash_alg, halg);
388 struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
389 sha_alg);
390
391 crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
392 /* update context with ptr to cp */
393 sha_ctx->cp = q_alg->cp;
394 sha_ctx->sg = NULL;
395 sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
396 SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
397 if (sha_ctx->tmp_tbuf == NULL) {
398 pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
399 PTR_ERR(sha_ctx->tmp_tbuf));
400 return -ENOMEM;
401 }
402
403 sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
404 if (sha_ctx->trailing_buf == NULL) {
405 kfree(sha_ctx->tmp_tbuf);
406 sha_ctx->tmp_tbuf = NULL;
407 pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
408 PTR_ERR(sha_ctx->trailing_buf));
409 return -ENOMEM;
410 }
411
412 sha_ctx->ahash_req = NULL;
413 return 0;
414};
415
416static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
417{
418 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
419
420 kfree(sha_ctx->tmp_tbuf);
421 sha_ctx->tmp_tbuf = NULL;
422 kfree(sha_ctx->trailing_buf);
423 sha_ctx->trailing_buf = NULL;
424 if (sha_ctx->sg != NULL) {
425 kfree(sha_ctx->sg);
426 sha_ctx->sg = NULL;
427 }
428 if (sha_ctx->ahash_req != NULL) {
429 ahash_request_free(sha_ctx->ahash_req);
430 sha_ctx->ahash_req = NULL;
431 }
432};
433
434
435static void _crypto_sha_hmac_ahash_req_complete(
436 struct crypto_async_request *req, int err);
437
438static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
439{
440 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
441 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
442 int ret = 0;
443
444 ret = _qcrypto_ahash_cra_init(tfm);
445 if (ret)
446 return ret;
447 sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
448
449 if (sha_ctx->ahash_req == NULL) {
450 _qcrypto_ahash_cra_exit(tfm);
451 return -ENOMEM;
452 }
453
454 init_completion(&sha_ctx->ahash_req_complete);
455 ahash_request_set_callback(sha_ctx->ahash_req,
456 CRYPTO_TFM_REQ_MAY_BACKLOG,
457 _crypto_sha_hmac_ahash_req_complete,
458 &sha_ctx->ahash_req_complete);
459 crypto_ahash_clear_flags(ahash, ~0);
460
461 return 0;
462};
463
464static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
465{
466 tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
467 return _qcrypto_cipher_cra_init(tfm);
468};
469
470static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm)
471{
472 tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
473 return _qcrypto_cipher_cra_init(tfm);
474};
475
476static int _disp_stats(int id)
477{
478 struct crypto_stat *pstat;
479 int len = 0;
480
481 pstat = &_qcrypto_stat[id];
482 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
483 "\nQualcomm crypto accelerator %d Statistics:\n",
484 id + 1);
485
486 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
487 " ABLK AES CIPHER encryption : %d\n",
488 pstat->ablk_cipher_aes_enc);
489 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
490 " ABLK AES CIPHER decryption : %d\n",
491 pstat->ablk_cipher_aes_dec);
492
493 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
494 " ABLK DES CIPHER encryption : %d\n",
495 pstat->ablk_cipher_des_enc);
496 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
497 " ABLK DES CIPHER decryption : %d\n",
498 pstat->ablk_cipher_des_dec);
499
500 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
501 " ABLK 3DES CIPHER encryption : %d\n",
502 pstat->ablk_cipher_3des_enc);
503
504 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
505 " ABLK 3DES CIPHER decryption : %d\n",
506 pstat->ablk_cipher_3des_dec);
507
508 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
509 " ABLK CIPHER operation success: %d\n",
510 pstat->ablk_cipher_op_success);
511 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
512 " ABLK CIPHER operation fail : %d\n",
513 pstat->ablk_cipher_op_fail);
514
515 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
516 " AEAD SHA1-AES encryption : %d\n",
517 pstat->aead_sha1_aes_enc);
518 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
519 " AEAD SHA1-AES decryption : %d\n",
520 pstat->aead_sha1_aes_dec);
521
522 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
523 " AEAD SHA1-DES encryption : %d\n",
524 pstat->aead_sha1_des_enc);
525 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
526 " AEAD SHA1-DES decryption : %d\n",
527 pstat->aead_sha1_des_dec);
528
529 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
530 " AEAD SHA1-3DES encryption : %d\n",
531 pstat->aead_sha1_3des_enc);
532 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
533 " AEAD SHA1-3DES decryption : %d\n",
534 pstat->aead_sha1_3des_dec);
535
536 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
537 " AEAD operation success : %d\n",
538 pstat->aead_op_success);
539 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
540 " AEAD operation fail : %d\n",
541 pstat->aead_op_fail);
542 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
543 " SHA1 digest : %d\n",
544 pstat->sha1_digest);
545 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
546 " SHA256 digest : %d\n",
547 pstat->sha256_digest);
548 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
549 " SHA operation fail : %d\n",
550 pstat->sha_op_fail);
551 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
552 " SHA operation success : %d\n",
553 pstat->sha_op_success);
554 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
555 " SHA1 HMAC digest : %d\n",
556 pstat->sha1_hmac_digest);
557 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
558 " SHA256 HMAC digest : %d\n",
559 pstat->sha256_hmac_digest);
560 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
561 " SHA HMAC operation fail : %d\n",
562 pstat->sha_hmac_op_fail);
563 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
564 " SHA HMAC operation success : %d\n",
565 pstat->sha_hmac_op_success);
566 return len;
567}
568
569static int _qcrypto_remove(struct platform_device *pdev)
570{
571 struct crypto_priv *cp;
572 struct qcrypto_alg *q_alg;
573 struct qcrypto_alg *n;
574
575 cp = platform_get_drvdata(pdev);
576
577 if (!cp)
578 return 0;
579
580 list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
581 if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
582 crypto_unregister_alg(&q_alg->cipher_alg);
583 if (q_alg->alg_type == QCRYPTO_ALG_SHA)
584 crypto_unregister_ahash(&q_alg->sha_alg);
585 list_del(&q_alg->entry);
586 kfree(q_alg);
587 }
588
589 if (cp->qce)
590 qce_close(cp->qce);
591 tasklet_kill(&cp->done_tasklet);
592 kfree(cp);
593 return 0;
594};
595
596static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
597 unsigned int len)
598{
599 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
600 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
601 struct crypto_priv *cp = ctx->cp;
602
603 switch (len) {
604 case AES_KEYSIZE_128:
605 case AES_KEYSIZE_256:
606 break;
607 case AES_KEYSIZE_192:
608 if (cp->ce_support.aes_key_192)
609 break;
610 default:
611 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
612 return -EINVAL;
613 };
614 ctx->enc_key_len = len;
615 memcpy(ctx->enc_key, key, len);
616 return 0;
617};
618
619static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
620 unsigned int len)
621{
622 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
623 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
624 u32 tmp[DES_EXPKEY_WORDS];
625 int ret = des_ekey(tmp, key);
626
627 if (len != DES_KEY_SIZE) {
628 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
629 return -EINVAL;
630 };
631
632 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
633 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
634 return -EINVAL;
635 }
636
637 ctx->enc_key_len = len;
638 memcpy(ctx->enc_key, key, len);
639 return 0;
640};
641
642static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
643 unsigned int len)
644{
645 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
646 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
647
648 if (len != DES3_EDE_KEY_SIZE) {
649 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
650 return -EINVAL;
651 };
652 ctx->enc_key_len = len;
653 memcpy(ctx->enc_key, key, len);
654 return 0;
655};
656
657static void req_done(unsigned long data)
658{
659 struct crypto_async_request *areq;
660 struct crypto_priv *cp = (struct crypto_priv *)data;
661 unsigned long flags;
662
663 spin_lock_irqsave(&cp->lock, flags);
664 areq = cp->req;
665 cp->req = NULL;
666 spin_unlock_irqrestore(&cp->lock, flags);
667
668 if (areq)
669 areq->complete(areq, cp->res);
670 _start_qcrypto_process(cp);
671};
672
673static void _update_sha1_ctx(struct ahash_request *req)
674{
675 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
676 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
677 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
678
679 if (sha_ctx->last_blk == 1)
680 memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
681 else {
682 memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
683 memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
684 sha_ctx->trailing_buf_len);
685 _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
686 SHA1_DIGEST_SIZE);
687 }
688 return;
689}
690
691static void _update_sha256_ctx(struct ahash_request *req)
692{
693 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
694 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
695 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
696
697 if (sha_ctx->last_blk == 1)
698 memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
699 else {
700 memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
701 memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
702 sha_ctx->trailing_buf_len);
703 _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
704 SHA256_DIGEST_SIZE);
705 }
706 return;
707}
708
709static void _qce_ahash_complete(void *cookie, unsigned char *digest,
710 unsigned char *authdata, int ret)
711{
712 struct ahash_request *areq = (struct ahash_request *) cookie;
713 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
714 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
715 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
716 struct crypto_priv *cp = sha_ctx->cp;
717 struct crypto_stat *pstat;
718 uint32_t diglen = crypto_ahash_digestsize(ahash);
719 uint32_t *auth32 = (uint32_t *)authdata;
720
721 pstat = &_qcrypto_stat[cp->pdev->id];
722
723#ifdef QCRYPTO_DEBUG
724 dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
725 areq, ret);
726#endif
727
728 if (digest) {
729 memcpy(sha_ctx->digest, digest, diglen);
730 memcpy(areq->result, digest, diglen);
731 }
732 if (authdata) {
733 sha_ctx->byte_count[0] = auth32[0];
734 sha_ctx->byte_count[1] = auth32[1];
735 sha_ctx->byte_count[2] = auth32[2];
736 sha_ctx->byte_count[3] = auth32[3];
737 }
738 areq->src = rctx->src;
739 areq->nbytes = rctx->nbytes;
740
741 if (sha_ctx->sg != NULL) {
742 kfree(sha_ctx->sg);
743 sha_ctx->sg = NULL;
744 }
745
746 if (sha_ctx->alg == QCE_HASH_SHA1)
747 _update_sha1_ctx(areq);
748 if (sha_ctx->alg == QCE_HASH_SHA256)
749 _update_sha256_ctx(areq);
750
751 sha_ctx->last_blk = 0;
752 sha_ctx->first_blk = 0;
753
754 if (ret) {
755 cp->res = -ENXIO;
756 pstat->sha_op_fail++;
757 } else {
758 cp->res = 0;
759 pstat->sha_op_success++;
760 }
761
762 if (cp->platform_support.ce_shared)
763 schedule_work(&cp->unlock_ce_ws);
764 tasklet_schedule(&cp->done_tasklet);
765};
766
767static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
768 unsigned char *iv, int ret)
769{
770 struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
771 struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
772 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
773 struct crypto_priv *cp = ctx->cp;
774 struct crypto_stat *pstat;
775
776 pstat = &_qcrypto_stat[cp->pdev->id];
777
778#ifdef QCRYPTO_DEBUG
779 dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
780 areq, ret);
781#endif
782 if (iv)
783 memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
784
785 if (ret) {
786 cp->res = -ENXIO;
787 pstat->ablk_cipher_op_fail++;
788 } else {
789 cp->res = 0;
790 pstat->ablk_cipher_op_success++;
791 }
792 if (cp->platform_support.ce_shared)
793 schedule_work(&cp->unlock_ce_ws);
794 tasklet_schedule(&cp->done_tasklet);
795};
796
797
798static void _qce_aead_complete(void *cookie, unsigned char *icv,
799 unsigned char *iv, int ret)
800{
801 struct aead_request *areq = (struct aead_request *) cookie;
802 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
803 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
804 struct crypto_priv *cp = ctx->cp;
805 struct qcrypto_cipher_req_ctx *rctx;
806 struct crypto_stat *pstat;
807
808 pstat = &_qcrypto_stat[cp->pdev->id];
809
810 rctx = aead_request_ctx(areq);
811
812 if (rctx->mode == QCE_MODE_CCM) {
813 kzfree(rctx->assoc);
814 areq->assoc = rctx->assoc_sg;
815 areq->assoclen = rctx->assoclen;
816 if (ret) {
817 if (ret == 0x2000000)
818 ret = -EBADMSG;
819 else
820 ret = -ENXIO;
821 }
822 } else {
823 if (ret == 0) {
824 if (rctx->dir == QCE_ENCRYPT) {
825 /* copy the icv to dst */
826 scatterwalk_map_and_copy(icv, areq->dst,
827 areq->cryptlen,
828 ctx->authsize, 1);
829
830 } else {
831 unsigned char tmp[SHA256_DIGESTSIZE];
832
833 /* compare icv from src */
834 scatterwalk_map_and_copy(tmp,
835 areq->src, areq->cryptlen -
836 ctx->authsize, ctx->authsize, 0);
837 ret = memcmp(icv, tmp, ctx->authsize);
838 if (ret != 0)
839 ret = -EBADMSG;
840
841 }
842 } else {
843 ret = -ENXIO;
844 }
845
846 if (iv)
847 memcpy(ctx->iv, iv, crypto_aead_ivsize(aead));
848 }
849
850 if (ret)
851 pstat->aead_op_fail++;
852 else
853 pstat->aead_op_success++;
854
855 if (cp->platform_support.ce_shared)
856 schedule_work(&cp->unlock_ce_ws);
857 tasklet_schedule(&cp->done_tasklet);
858}
859
860static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
861{
862 __be32 data;
863
864 memset(block, 0, csize);
865 block += csize;
866
867 if (csize >= 4)
868 csize = 4;
869 else if (msglen > (1 << (8 * csize)))
870 return -EOVERFLOW;
871
872 data = cpu_to_be32(msglen);
873 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
874
875 return 0;
876}
877
878static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
879{
880 struct aead_request *areq = (struct aead_request *) qreq->areq;
881 unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
882
883 memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
884 /*
885 * Format control info per RFC 3610 and
886 * NIST Special Publication 800-38C
887 */
888 qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
889 if (areq->assoclen)
890 qreq->nonce[0] |= 64;
891
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -0700892 if (i > MAX_NONCE)
893 return -EINVAL;
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
896}
897
898static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
899 struct scatterlist *sg)
900{
901 unsigned char *adata;
902 uint32_t len, l;
903
904 qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA));
905 if (!qreq->assoc) {
906 pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
907 PTR_ERR(qreq->assoc));
908 return -ENOMEM;
909 }
910 adata = qreq->assoc;
911 /*
912 * Add control info for associated data
913 * RFC 3610 and NIST Special Publication 800-38C
914 */
915 if (alen < 65280) {
916 *(__be16 *)adata = cpu_to_be16(alen);
917 len = 2;
918 } else {
919 if ((alen >= 65280) && (alen <= 0xffffffff)) {
920 *(__be16 *)adata = cpu_to_be16(0xfffe);
921 *(__be32 *)&adata[2] = cpu_to_be32(alen);
922 len = 6;
923 } else {
924 *(__be16 *)adata = cpu_to_be16(0xffff);
925 *(__be32 *)&adata[6] = cpu_to_be32(alen);
926 len = 10;
927 }
928 }
929 adata += len;
930 qreq->assoclen = ALIGN((alen + len), 16);
931 for (l = alen; l > 0; sg = sg_next(sg)) {
932 memcpy(adata, sg_virt(sg), sg->length);
933 l -= sg->length;
934 adata += sg->length;
935 }
936 return 0;
937}
938
939static void _start_qcrypto_process(struct crypto_priv *cp)
940{
941 struct crypto_async_request *async_req = NULL;
942 struct crypto_async_request *backlog = NULL;
943 unsigned long flags;
944 u32 type;
945 struct qce_req qreq;
946 int ret;
947 struct qcrypto_cipher_req_ctx *rctx;
948 struct qcrypto_cipher_ctx *cipher_ctx;
949 struct qcrypto_sha_ctx *sha_ctx;
950 struct crypto_stat *pstat;
951
952 pstat = &_qcrypto_stat[cp->pdev->id];
953
954again:
955 spin_lock_irqsave(&cp->lock, flags);
956 if (cp->req == NULL) {
957 backlog = crypto_get_backlog(&cp->queue);
958 async_req = crypto_dequeue_request(&cp->queue);
959 cp->req = async_req;
960 }
961 spin_unlock_irqrestore(&cp->lock, flags);
962 if (!async_req)
963 return;
964 if (backlog)
965 backlog->complete(backlog, -EINPROGRESS);
966 type = crypto_tfm_alg_type(async_req->tfm);
967
968 if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
969 struct ablkcipher_request *req;
970 struct crypto_ablkcipher *tfm;
971
972 req = container_of(async_req, struct ablkcipher_request, base);
973 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
974 rctx = ablkcipher_request_ctx(req);
975 tfm = crypto_ablkcipher_reqtfm(req);
976
977 qreq.op = QCE_REQ_ABLK_CIPHER;
978 qreq.qce_cb = _qce_ablk_cipher_complete;
979 qreq.areq = req;
980 qreq.alg = rctx->alg;
981 qreq.dir = rctx->dir;
982 qreq.mode = rctx->mode;
983 qreq.enckey = cipher_ctx->enc_key;
984 qreq.encklen = cipher_ctx->enc_key_len;
985 qreq.iv = req->info;
986 qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
987 qreq.cryptlen = req->nbytes;
988 qreq.use_pmem = 0;
989
990 if ((cipher_ctx->enc_key_len == 0) &&
991 (cp->platform_support.hw_key_support == 0))
992 ret = -EINVAL;
993 else
994 ret = qce_ablk_cipher_req(cp->qce, &qreq);
995 } else {
996 if (type == CRYPTO_ALG_TYPE_AHASH) {
997
998 struct ahash_request *req;
999 struct qce_sha_req sreq;
1000
1001 req = container_of(async_req,
1002 struct ahash_request, base);
1003 sha_ctx = crypto_tfm_ctx(async_req->tfm);
1004
1005 sreq.qce_cb = _qce_ahash_complete;
1006 sreq.digest = &sha_ctx->digest[0];
1007 sreq.src = req->src;
1008 sreq.auth_data[0] = sha_ctx->byte_count[0];
1009 sreq.auth_data[1] = sha_ctx->byte_count[1];
1010 sreq.auth_data[2] = sha_ctx->byte_count[2];
1011 sreq.auth_data[3] = sha_ctx->byte_count[3];
1012 sreq.first_blk = sha_ctx->first_blk;
1013 sreq.last_blk = sha_ctx->last_blk;
1014 sreq.size = req->nbytes;
1015 sreq.areq = req;
1016
1017 switch (sha_ctx->alg) {
1018 case QCE_HASH_SHA1:
1019 sreq.alg = QCE_HASH_SHA1;
1020 sreq.authkey = NULL;
1021 break;
1022 case QCE_HASH_SHA256:
1023 sreq.alg = QCE_HASH_SHA256;
1024 sreq.authkey = NULL;
1025 break;
1026 case QCE_HASH_SHA1_HMAC:
1027 sreq.alg = QCE_HASH_SHA1_HMAC;
1028 sreq.authkey = &sha_ctx->authkey[0];
1029 break;
1030 case QCE_HASH_SHA256_HMAC:
1031 sreq.alg = QCE_HASH_SHA256_HMAC;
1032 sreq.authkey = &sha_ctx->authkey[0];
1033 break;
1034 default:
1035 break;
1036 };
1037 ret = qce_process_sha_req(cp->qce, &sreq);
1038
1039 } else {
1040 struct aead_request *req = container_of(async_req,
1041 struct aead_request, base);
1042 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1043
1044 rctx = aead_request_ctx(req);
1045 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
1046
1047 qreq.op = QCE_REQ_AEAD;
1048 qreq.qce_cb = _qce_aead_complete;
1049
1050 qreq.areq = req;
1051 qreq.alg = rctx->alg;
1052 qreq.dir = rctx->dir;
1053 qreq.mode = rctx->mode;
1054 qreq.iv = rctx->iv;
1055
1056 qreq.enckey = cipher_ctx->enc_key;
1057 qreq.encklen = cipher_ctx->enc_key_len;
1058 qreq.authkey = cipher_ctx->auth_key;
1059 qreq.authklen = cipher_ctx->auth_key_len;
1060 qreq.authsize = crypto_aead_authsize(aead);
1061 qreq.ivsize = crypto_aead_ivsize(aead);
1062 if (qreq.mode == QCE_MODE_CCM) {
1063 if (qreq.dir == QCE_ENCRYPT)
1064 qreq.cryptlen = req->cryptlen;
1065 else
1066 qreq.cryptlen = req->cryptlen -
1067 qreq.authsize;
1068 /* Get NONCE */
1069 ret = qccrypto_set_aead_ccm_nonce(&qreq);
1070 if (ret)
1071 goto done;
1072 /* Format Associated data */
1073 ret = qcrypto_aead_ccm_format_adata(&qreq,
1074 req->assoclen,
1075 req->assoc);
1076 if (ret)
1077 goto done;
1078 /*
1079 * Save the original associated data
1080 * length and sg
1081 */
1082 rctx->assoc_sg = req->assoc;
1083 rctx->assoclen = req->assoclen;
1084 rctx->assoc = qreq.assoc;
1085 /*
1086 * update req with new formatted associated
1087 * data info
1088 */
1089 req->assoc = &rctx->asg;
1090 req->assoclen = qreq.assoclen;
1091 sg_set_buf(req->assoc, qreq.assoc,
1092 req->assoclen);
1093 sg_mark_end(req->assoc);
1094 }
1095 ret = qce_aead_req(cp->qce, &qreq);
1096 }
1097 };
1098done:
1099 if (ret) {
1100
1101 spin_lock_irqsave(&cp->lock, flags);
1102 cp->req = NULL;
1103 spin_unlock_irqrestore(&cp->lock, flags);
1104
1105 if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
1106 pstat->ablk_cipher_op_fail++;
1107 else
1108 if (type == CRYPTO_ALG_TYPE_AHASH)
1109 pstat->sha_op_fail++;
1110 else
1111 pstat->aead_op_fail++;
1112
1113 async_req->complete(async_req, ret);
1114 goto again;
1115 };
1116};
1117
1118static int _qcrypto_queue_req(struct crypto_priv *cp,
1119 struct crypto_async_request *req)
1120{
1121 int ret;
1122 unsigned long flags;
1123
1124 if (cp->platform_support.ce_shared) {
1125 ret = qcrypto_lock_ce(cp);
1126 if (ret)
1127 return ret;
1128 }
1129
1130 spin_lock_irqsave(&cp->lock, flags);
1131 ret = crypto_enqueue_request(&cp->queue, req);
1132 spin_unlock_irqrestore(&cp->lock, flags);
1133 _start_qcrypto_process(cp);
1134
1135 return ret;
1136}
1137
1138static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
1139{
1140 struct qcrypto_cipher_req_ctx *rctx;
1141 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1142 struct crypto_priv *cp = ctx->cp;
1143 struct crypto_stat *pstat;
1144
1145 pstat = &_qcrypto_stat[cp->pdev->id];
1146
1147 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1148 CRYPTO_ALG_TYPE_ABLKCIPHER);
1149#ifdef QCRYPTO_DEBUG
1150 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
1151#endif
1152 rctx = ablkcipher_request_ctx(req);
1153 rctx->aead = 0;
1154 rctx->alg = CIPHER_ALG_AES;
1155 rctx->dir = QCE_ENCRYPT;
1156 rctx->mode = QCE_MODE_ECB;
1157
1158 pstat->ablk_cipher_aes_enc++;
1159 return _qcrypto_queue_req(cp, &req->base);
1160};
1161
1162static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
1163{
1164 struct qcrypto_cipher_req_ctx *rctx;
1165 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1166 struct crypto_priv *cp = ctx->cp;
1167 struct crypto_stat *pstat;
1168
1169 pstat = &_qcrypto_stat[cp->pdev->id];
1170
1171 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1172 CRYPTO_ALG_TYPE_ABLKCIPHER);
1173#ifdef QCRYPTO_DEBUG
1174 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
1175#endif
1176 rctx = ablkcipher_request_ctx(req);
1177 rctx->aead = 0;
1178 rctx->alg = CIPHER_ALG_AES;
1179 rctx->dir = QCE_ENCRYPT;
1180 rctx->mode = QCE_MODE_CBC;
1181
1182 pstat->ablk_cipher_aes_enc++;
1183 return _qcrypto_queue_req(cp, &req->base);
1184};
1185
1186static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
1187{
1188 struct qcrypto_cipher_req_ctx *rctx;
1189 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1190 struct crypto_priv *cp = ctx->cp;
1191 struct crypto_stat *pstat;
1192
1193 pstat = &_qcrypto_stat[cp->pdev->id];
1194
1195 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1196 CRYPTO_ALG_TYPE_ABLKCIPHER);
1197#ifdef QCRYPTO_DEBUG
1198 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
1199#endif
1200 rctx = ablkcipher_request_ctx(req);
1201 rctx->aead = 0;
1202 rctx->alg = CIPHER_ALG_AES;
1203 rctx->dir = QCE_ENCRYPT;
1204 rctx->mode = QCE_MODE_CTR;
1205
1206 pstat->ablk_cipher_aes_enc++;
1207 return _qcrypto_queue_req(cp, &req->base);
1208};
1209
1210static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
1211{
1212 struct qcrypto_cipher_req_ctx *rctx;
1213 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1214 struct crypto_priv *cp = ctx->cp;
1215 struct crypto_stat *pstat;
1216
1217 pstat = &_qcrypto_stat[cp->pdev->id];
1218
1219 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1220 CRYPTO_ALG_TYPE_ABLKCIPHER);
1221 rctx = ablkcipher_request_ctx(req);
1222 rctx->aead = 0;
1223 rctx->alg = CIPHER_ALG_AES;
1224 rctx->dir = QCE_ENCRYPT;
1225 rctx->mode = QCE_MODE_XTS;
1226
1227 pstat->ablk_cipher_aes_enc++;
1228 return _qcrypto_queue_req(cp, &req->base);
1229};
1230
1231static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
1232{
1233 struct qcrypto_cipher_req_ctx *rctx;
1234 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1235 struct crypto_priv *cp = ctx->cp;
1236 struct crypto_stat *pstat;
1237
1238 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1239 return -EINVAL;
1240 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1241 (ctx->auth_key_len != AES_KEYSIZE_256))
1242 return -EINVAL;
1243
1244 pstat = &_qcrypto_stat[cp->pdev->id];
1245
1246 rctx = aead_request_ctx(req);
1247 rctx->aead = 1;
1248 rctx->alg = CIPHER_ALG_AES;
1249 rctx->dir = QCE_ENCRYPT;
1250 rctx->mode = QCE_MODE_CCM;
1251 rctx->iv = req->iv;
1252
1253 pstat->aead_sha1_aes_enc++;
1254 return _qcrypto_queue_req(cp, &req->base);
1255}
1256
1257static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
1258{
1259 struct qcrypto_cipher_req_ctx *rctx;
1260 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1261 struct crypto_priv *cp = ctx->cp;
1262 struct crypto_stat *pstat;
1263
1264 pstat = &_qcrypto_stat[cp->pdev->id];
1265
1266 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1267 CRYPTO_ALG_TYPE_ABLKCIPHER);
1268 rctx = ablkcipher_request_ctx(req);
1269 rctx->aead = 0;
1270 rctx->alg = CIPHER_ALG_DES;
1271 rctx->dir = QCE_ENCRYPT;
1272 rctx->mode = QCE_MODE_ECB;
1273
1274 pstat->ablk_cipher_des_enc++;
1275 return _qcrypto_queue_req(cp, &req->base);
1276};
1277
1278static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
1279{
1280 struct qcrypto_cipher_req_ctx *rctx;
1281 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1282 struct crypto_priv *cp = ctx->cp;
1283 struct crypto_stat *pstat;
1284
1285 pstat = &_qcrypto_stat[cp->pdev->id];
1286
1287 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1288 CRYPTO_ALG_TYPE_ABLKCIPHER);
1289 rctx = ablkcipher_request_ctx(req);
1290 rctx->aead = 0;
1291 rctx->alg = CIPHER_ALG_DES;
1292 rctx->dir = QCE_ENCRYPT;
1293 rctx->mode = QCE_MODE_CBC;
1294
1295 pstat->ablk_cipher_des_enc++;
1296 return _qcrypto_queue_req(cp, &req->base);
1297};
1298
1299static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
1300{
1301 struct qcrypto_cipher_req_ctx *rctx;
1302 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1303 struct crypto_priv *cp = ctx->cp;
1304 struct crypto_stat *pstat;
1305
1306 pstat = &_qcrypto_stat[cp->pdev->id];
1307
1308 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1309 CRYPTO_ALG_TYPE_ABLKCIPHER);
1310 rctx = ablkcipher_request_ctx(req);
1311 rctx->aead = 0;
1312 rctx->alg = CIPHER_ALG_3DES;
1313 rctx->dir = QCE_ENCRYPT;
1314 rctx->mode = QCE_MODE_ECB;
1315
1316 pstat->ablk_cipher_3des_enc++;
1317 return _qcrypto_queue_req(cp, &req->base);
1318};
1319
1320static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
1321{
1322 struct qcrypto_cipher_req_ctx *rctx;
1323 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1324 struct crypto_priv *cp = ctx->cp;
1325 struct crypto_stat *pstat;
1326
1327 pstat = &_qcrypto_stat[cp->pdev->id];
1328
1329 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1330 CRYPTO_ALG_TYPE_ABLKCIPHER);
1331 rctx = ablkcipher_request_ctx(req);
1332 rctx->aead = 0;
1333 rctx->alg = CIPHER_ALG_3DES;
1334 rctx->dir = QCE_ENCRYPT;
1335 rctx->mode = QCE_MODE_CBC;
1336
1337 pstat->ablk_cipher_3des_enc++;
1338 return _qcrypto_queue_req(cp, &req->base);
1339};
1340
1341static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
1342{
1343 struct qcrypto_cipher_req_ctx *rctx;
1344 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1345 struct crypto_priv *cp = ctx->cp;
1346 struct crypto_stat *pstat;
1347
1348 pstat = &_qcrypto_stat[cp->pdev->id];
1349
1350 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1351 CRYPTO_ALG_TYPE_ABLKCIPHER);
1352#ifdef QCRYPTO_DEBUG
1353 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
1354#endif
1355 rctx = ablkcipher_request_ctx(req);
1356 rctx->aead = 0;
1357 rctx->alg = CIPHER_ALG_AES;
1358 rctx->dir = QCE_DECRYPT;
1359 rctx->mode = QCE_MODE_ECB;
1360
1361 pstat->ablk_cipher_aes_dec++;
1362 return _qcrypto_queue_req(cp, &req->base);
1363};
1364
1365static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
1366{
1367 struct qcrypto_cipher_req_ctx *rctx;
1368 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1369 struct crypto_priv *cp = ctx->cp;
1370 struct crypto_stat *pstat;
1371
1372 pstat = &_qcrypto_stat[cp->pdev->id];
1373
1374 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1375 CRYPTO_ALG_TYPE_ABLKCIPHER);
1376#ifdef QCRYPTO_DEBUG
1377 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
1378#endif
1379
1380 rctx = ablkcipher_request_ctx(req);
1381 rctx->aead = 0;
1382 rctx->alg = CIPHER_ALG_AES;
1383 rctx->dir = QCE_DECRYPT;
1384 rctx->mode = QCE_MODE_CBC;
1385
1386 pstat->ablk_cipher_aes_dec++;
1387 return _qcrypto_queue_req(cp, &req->base);
1388};
1389
1390static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
1391{
1392 struct qcrypto_cipher_req_ctx *rctx;
1393 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1394 struct crypto_priv *cp = ctx->cp;
1395 struct crypto_stat *pstat;
1396
1397 pstat = &_qcrypto_stat[cp->pdev->id];
1398
1399 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1400 CRYPTO_ALG_TYPE_ABLKCIPHER);
1401#ifdef QCRYPTO_DEBUG
1402 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
1403#endif
1404 rctx = ablkcipher_request_ctx(req);
1405 rctx->aead = 0;
1406 rctx->alg = CIPHER_ALG_AES;
1407 rctx->mode = QCE_MODE_CTR;
1408
1409 /* Note. There is no such thing as aes/counter mode, decrypt */
1410 rctx->dir = QCE_ENCRYPT;
1411
1412 pstat->ablk_cipher_aes_dec++;
1413 return _qcrypto_queue_req(cp, &req->base);
1414};
1415
1416static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
1417{
1418 struct qcrypto_cipher_req_ctx *rctx;
1419 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1420 struct crypto_priv *cp = ctx->cp;
1421 struct crypto_stat *pstat;
1422
1423 pstat = &_qcrypto_stat[cp->pdev->id];
1424
1425 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1426 CRYPTO_ALG_TYPE_ABLKCIPHER);
1427 rctx = ablkcipher_request_ctx(req);
1428 rctx->aead = 0;
1429 rctx->alg = CIPHER_ALG_DES;
1430 rctx->dir = QCE_DECRYPT;
1431 rctx->mode = QCE_MODE_ECB;
1432
1433 pstat->ablk_cipher_des_dec++;
1434 return _qcrypto_queue_req(cp, &req->base);
1435};
1436
1437static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
1438{
1439 struct qcrypto_cipher_req_ctx *rctx;
1440 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1441 struct crypto_priv *cp = ctx->cp;
1442 struct crypto_stat *pstat;
1443
1444 pstat = &_qcrypto_stat[cp->pdev->id];
1445
1446 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1447 CRYPTO_ALG_TYPE_ABLKCIPHER);
1448 rctx = ablkcipher_request_ctx(req);
1449 rctx->aead = 0;
1450 rctx->alg = CIPHER_ALG_DES;
1451 rctx->dir = QCE_DECRYPT;
1452 rctx->mode = QCE_MODE_CBC;
1453
1454 pstat->ablk_cipher_des_dec++;
1455 return _qcrypto_queue_req(cp, &req->base);
1456};
1457
1458static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
1459{
1460 struct qcrypto_cipher_req_ctx *rctx;
1461 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1462 struct crypto_priv *cp = ctx->cp;
1463 struct crypto_stat *pstat;
1464
1465 pstat = &_qcrypto_stat[cp->pdev->id];
1466
1467 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1468 CRYPTO_ALG_TYPE_ABLKCIPHER);
1469 rctx = ablkcipher_request_ctx(req);
1470 rctx->aead = 0;
1471 rctx->alg = CIPHER_ALG_3DES;
1472 rctx->dir = QCE_DECRYPT;
1473 rctx->mode = QCE_MODE_ECB;
1474
1475 pstat->ablk_cipher_3des_dec++;
1476 return _qcrypto_queue_req(cp, &req->base);
1477};
1478
1479static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
1480{
1481 struct qcrypto_cipher_req_ctx *rctx;
1482 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1483 struct crypto_priv *cp = ctx->cp;
1484 struct crypto_stat *pstat;
1485
1486 pstat = &_qcrypto_stat[cp->pdev->id];
1487
1488 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1489 CRYPTO_ALG_TYPE_ABLKCIPHER);
1490 rctx = ablkcipher_request_ctx(req);
1491 rctx->aead = 0;
1492 rctx->alg = CIPHER_ALG_3DES;
1493 rctx->dir = QCE_DECRYPT;
1494 rctx->mode = QCE_MODE_CBC;
1495
1496 pstat->ablk_cipher_3des_dec++;
1497 return _qcrypto_queue_req(cp, &req->base);
1498};
1499
1500static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
1501{
1502 struct qcrypto_cipher_req_ctx *rctx;
1503 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1504 struct crypto_priv *cp = ctx->cp;
1505 struct crypto_stat *pstat;
1506
1507 pstat = &_qcrypto_stat[cp->pdev->id];
1508
1509 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1510 CRYPTO_ALG_TYPE_ABLKCIPHER);
1511 rctx = ablkcipher_request_ctx(req);
1512 rctx->aead = 0;
1513 rctx->alg = CIPHER_ALG_AES;
1514 rctx->mode = QCE_MODE_XTS;
1515 rctx->dir = QCE_DECRYPT;
1516
1517 pstat->ablk_cipher_aes_dec++;
1518 return _qcrypto_queue_req(cp, &req->base);
1519};
1520
1521
1522static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
1523{
1524 struct qcrypto_cipher_req_ctx *rctx;
1525 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1526 struct crypto_priv *cp = ctx->cp;
1527 struct crypto_stat *pstat;
1528
1529 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1530 return -EINVAL;
1531 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1532 (ctx->auth_key_len != AES_KEYSIZE_256))
1533 return -EINVAL;
1534
1535 pstat = &_qcrypto_stat[cp->pdev->id];
1536
1537 rctx = aead_request_ctx(req);
1538 rctx->aead = 1;
1539 rctx->alg = CIPHER_ALG_AES;
1540 rctx->dir = QCE_DECRYPT;
1541 rctx->mode = QCE_MODE_CCM;
1542 rctx->iv = req->iv;
1543
1544 pstat->aead_sha1_aes_dec++;
1545 return _qcrypto_queue_req(cp, &req->base);
1546}
1547
1548static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
1549 unsigned int authsize)
1550{
1551 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1552
1553 ctx->authsize = authsize;
1554 return 0;
1555}
1556
1557static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
1558 unsigned int authsize)
1559{
1560 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1561
1562 switch (authsize) {
1563 case 4:
1564 case 6:
1565 case 8:
1566 case 10:
1567 case 12:
1568 case 14:
1569 case 16:
1570 break;
1571 default:
1572 return -EINVAL;
1573 }
1574 ctx->authsize = authsize;
1575 return 0;
1576}
1577
1578static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1579 unsigned int keylen)
1580{
1581 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
1582 struct rtattr *rta = (struct rtattr *)key;
1583 struct crypto_authenc_key_param *param;
1584
1585 if (!RTA_OK(rta, keylen))
1586 goto badkey;
1587 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1588 goto badkey;
1589 if (RTA_PAYLOAD(rta) < sizeof(*param))
1590 goto badkey;
1591
1592 param = RTA_DATA(rta);
1593 ctx->enc_key_len = be32_to_cpu(param->enckeylen);
1594
1595 key += RTA_ALIGN(rta->rta_len);
1596 keylen -= RTA_ALIGN(rta->rta_len);
1597
1598 if (keylen < ctx->enc_key_len)
1599 goto badkey;
1600
1601 ctx->auth_key_len = keylen - ctx->enc_key_len;
1602 if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
1603 ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
1604 goto badkey;
1605 memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
1606 memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
1607 memcpy(ctx->auth_key, key, ctx->auth_key_len);
1608
1609 return 0;
1610badkey:
1611 ctx->enc_key_len = 0;
1612 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1613 return -EINVAL;
1614}
1615
1616static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
1617 unsigned int keylen)
1618{
1619 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1620 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1621 struct crypto_priv *cp = ctx->cp;
1622
1623 switch (keylen) {
1624 case AES_KEYSIZE_128:
1625 case AES_KEYSIZE_256:
1626 break;
1627 case AES_KEYSIZE_192:
1628 if (cp->ce_support.aes_key_192)
1629 break;
1630 default:
1631 ctx->enc_key_len = 0;
1632 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1633 return -EINVAL;
1634 };
1635 ctx->enc_key_len = keylen;
1636 memcpy(ctx->enc_key, key, keylen);
1637 ctx->auth_key_len = keylen;
1638 memcpy(ctx->auth_key, key, keylen);
1639
1640 return 0;
1641}
1642
1643static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
1644{
1645 struct qcrypto_cipher_req_ctx *rctx;
1646 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1647 struct crypto_priv *cp = ctx->cp;
1648 struct crypto_stat *pstat;
1649
1650 pstat = &_qcrypto_stat[cp->pdev->id];
1651
1652#ifdef QCRYPTO_DEBUG
1653 dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
1654#endif
1655
1656 rctx = aead_request_ctx(req);
1657 rctx->aead = 1;
1658 rctx->alg = CIPHER_ALG_AES;
1659 rctx->dir = QCE_ENCRYPT;
1660 rctx->mode = QCE_MODE_CBC;
1661 rctx->iv = req->iv;
1662
1663 pstat->aead_sha1_aes_enc++;
1664 return _qcrypto_queue_req(cp, &req->base);
1665}
1666
1667static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
1668{
1669 struct qcrypto_cipher_req_ctx *rctx;
1670 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1671 struct crypto_priv *cp = ctx->cp;
1672 struct crypto_stat *pstat;
1673
1674 pstat = &_qcrypto_stat[cp->pdev->id];
1675
1676#ifdef QCRYPTO_DEBUG
1677 dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
1678#endif
1679 rctx = aead_request_ctx(req);
1680 rctx->aead = 1;
1681 rctx->alg = CIPHER_ALG_AES;
1682 rctx->dir = QCE_DECRYPT;
1683 rctx->mode = QCE_MODE_CBC;
1684 rctx->iv = req->iv;
1685
1686 pstat->aead_sha1_aes_dec++;
1687 return _qcrypto_queue_req(cp, &req->base);
1688}
1689
1690static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
1691{
1692 struct aead_request *areq = &req->areq;
1693 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1694 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1695 struct crypto_priv *cp = ctx->cp;
1696 struct qcrypto_cipher_req_ctx *rctx;
1697 struct crypto_stat *pstat;
1698
1699 pstat = &_qcrypto_stat[cp->pdev->id];
1700
1701 rctx = aead_request_ctx(areq);
1702 rctx->aead = 1;
1703 rctx->alg = CIPHER_ALG_AES;
1704 rctx->dir = QCE_ENCRYPT;
1705 rctx->mode = QCE_MODE_CBC;
1706 rctx->iv = req->giv; /* generated iv */
1707
1708 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1709 /* avoid consecutive packets going out with same IV */
1710 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1711 pstat->aead_sha1_aes_enc++;
1712 return _qcrypto_queue_req(cp, &areq->base);
1713}
1714
1715#ifdef QCRYPTO_AEAD_AES_CTR
1716static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req)
1717{
1718 struct qcrypto_cipher_req_ctx *rctx;
1719 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1720 struct crypto_priv *cp = ctx->cp;
1721 struct crypto_stat *pstat;
1722
1723 pstat = &_qcrypto_stat[cp->pdev->id];
1724
1725 rctx = aead_request_ctx(req);
1726 rctx->aead = 1;
1727 rctx->alg = CIPHER_ALG_AES;
1728 rctx->dir = QCE_ENCRYPT;
1729 rctx->mode = QCE_MODE_CTR;
1730 rctx->iv = req->iv;
1731
1732 pstat->aead_sha1_aes_enc++;
1733 return _qcrypto_queue_req(cp, &req->base);
1734}
1735
1736static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
1737{
1738 struct qcrypto_cipher_req_ctx *rctx;
1739 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1740 struct crypto_priv *cp = ctx->cp;
1741 struct crypto_stat *pstat;
1742
1743 pstat = &_qcrypto_stat[cp->pdev->id];
1744
1745 rctx = aead_request_ctx(req);
1746 rctx->aead = 1;
1747 rctx->alg = CIPHER_ALG_AES;
1748
1749 /* Note. There is no such thing as aes/counter mode, decrypt */
1750 rctx->dir = QCE_ENCRYPT;
1751
1752 rctx->mode = QCE_MODE_CTR;
1753 rctx->iv = req->iv;
1754
1755 pstat->aead_sha1_aes_dec++;
1756 return _qcrypto_queue_req(cp, &req->base);
1757}
1758
1759static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
1760{
1761 struct aead_request *areq = &req->areq;
1762 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1763 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1764 struct crypto_priv *cp = ctx->cp;
1765 struct qcrypto_cipher_req_ctx *rctx;
1766 struct crypto_stat *pstat;
1767
1768 pstat = &_qcrypto_stat[cp->pdev->id];
1769
1770 rctx = aead_request_ctx(areq);
1771 rctx->aead = 1;
1772 rctx->alg = CIPHER_ALG_AES;
1773 rctx->dir = QCE_ENCRYPT;
1774 rctx->mode = QCE_MODE_CTR;
1775 rctx->iv = req->giv; /* generated iv */
1776
1777 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1778 /* avoid consecutive packets going out with same IV */
1779 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1780 pstat->aead_sha1_aes_enc++;
1781 return _qcrypto_queue_req(cp, &areq->base);
1782};
1783#endif /* QCRYPTO_AEAD_AES_CTR */
1784
1785static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
1786{
1787 struct qcrypto_cipher_req_ctx *rctx;
1788 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1789 struct crypto_priv *cp = ctx->cp;
1790 struct crypto_stat *pstat;
1791
1792 pstat = &_qcrypto_stat[cp->pdev->id];
1793
1794 rctx = aead_request_ctx(req);
1795 rctx->aead = 1;
1796 rctx->alg = CIPHER_ALG_DES;
1797 rctx->dir = QCE_ENCRYPT;
1798 rctx->mode = QCE_MODE_CBC;
1799 rctx->iv = req->iv;
1800
1801 pstat->aead_sha1_des_enc++;
1802 return _qcrypto_queue_req(cp, &req->base);
1803}
1804
1805static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
1806{
1807 struct qcrypto_cipher_req_ctx *rctx;
1808 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1809 struct crypto_priv *cp = ctx->cp;
1810 struct crypto_stat *pstat;
1811
1812 pstat = &_qcrypto_stat[cp->pdev->id];
1813
1814 rctx = aead_request_ctx(req);
1815 rctx->aead = 1;
1816 rctx->alg = CIPHER_ALG_DES;
1817 rctx->dir = QCE_DECRYPT;
1818 rctx->mode = QCE_MODE_CBC;
1819 rctx->iv = req->iv;
1820
1821 pstat->aead_sha1_des_dec++;
1822 return _qcrypto_queue_req(cp, &req->base);
1823}
1824
1825static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
1826{
1827 struct aead_request *areq = &req->areq;
1828 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1829 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1830 struct crypto_priv *cp = ctx->cp;
1831 struct qcrypto_cipher_req_ctx *rctx;
1832 struct crypto_stat *pstat;
1833
1834 pstat = &_qcrypto_stat[cp->pdev->id];
1835
1836 rctx = aead_request_ctx(areq);
1837 rctx->aead = 1;
1838 rctx->alg = CIPHER_ALG_DES;
1839 rctx->dir = QCE_ENCRYPT;
1840 rctx->mode = QCE_MODE_CBC;
1841 rctx->iv = req->giv; /* generated iv */
1842
1843 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1844 /* avoid consecutive packets going out with same IV */
1845 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1846 pstat->aead_sha1_des_enc++;
1847 return _qcrypto_queue_req(cp, &areq->base);
1848}
1849
1850static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
1851{
1852 struct qcrypto_cipher_req_ctx *rctx;
1853 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1854 struct crypto_priv *cp = ctx->cp;
1855 struct crypto_stat *pstat;
1856
1857 pstat = &_qcrypto_stat[cp->pdev->id];
1858
1859 rctx = aead_request_ctx(req);
1860 rctx->aead = 1;
1861 rctx->alg = CIPHER_ALG_3DES;
1862 rctx->dir = QCE_ENCRYPT;
1863 rctx->mode = QCE_MODE_CBC;
1864 rctx->iv = req->iv;
1865
1866 pstat->aead_sha1_3des_enc++;
1867 return _qcrypto_queue_req(cp, &req->base);
1868}
1869
1870static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
1871{
1872 struct qcrypto_cipher_req_ctx *rctx;
1873 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1874 struct crypto_priv *cp = ctx->cp;
1875 struct crypto_stat *pstat;
1876
1877 pstat = &_qcrypto_stat[cp->pdev->id];
1878
1879 rctx = aead_request_ctx(req);
1880 rctx->aead = 1;
1881 rctx->alg = CIPHER_ALG_3DES;
1882 rctx->dir = QCE_DECRYPT;
1883 rctx->mode = QCE_MODE_CBC;
1884 rctx->iv = req->iv;
1885
1886 pstat->aead_sha1_3des_dec++;
1887 return _qcrypto_queue_req(cp, &req->base);
1888}
1889
1890static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
1891{
1892 struct aead_request *areq = &req->areq;
1893 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1894 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1895 struct crypto_priv *cp = ctx->cp;
1896 struct qcrypto_cipher_req_ctx *rctx;
1897 struct crypto_stat *pstat;
1898
1899 pstat = &_qcrypto_stat[cp->pdev->id];
1900
1901 rctx = aead_request_ctx(areq);
1902 rctx->aead = 1;
1903 rctx->alg = CIPHER_ALG_3DES;
1904 rctx->dir = QCE_ENCRYPT;
1905 rctx->mode = QCE_MODE_CBC;
1906 rctx->iv = req->giv; /* generated iv */
1907
1908 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1909 /* avoid consecutive packets going out with same IV */
1910 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1911 pstat->aead_sha1_3des_enc++;
1912 return _qcrypto_queue_req(cp, &areq->base);
1913}
1914
1915static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
1916{
1917 int i;
1918
1919 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
1920 nbytes -= sg->length;
1921
1922 return i;
1923}
1924
1925static int _sha_init(struct qcrypto_sha_ctx *ctx)
1926{
1927 ctx->first_blk = 1;
1928 ctx->last_blk = 0;
1929 ctx->byte_count[0] = 0;
1930 ctx->byte_count[1] = 0;
1931 ctx->byte_count[2] = 0;
1932 ctx->byte_count[3] = 0;
1933 ctx->trailing_buf_len = 0;
1934
1935 return 0;
1936};
1937
1938static int _sha1_init(struct ahash_request *req)
1939{
1940 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1941 struct crypto_priv *cp = sha_ctx->cp;
1942 struct crypto_stat *pstat;
1943
1944 pstat = &_qcrypto_stat[cp->pdev->id];
1945
1946 _sha_init(sha_ctx);
1947 sha_ctx->alg = QCE_HASH_SHA1;
1948
1949 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
1950 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
1951 SHA1_DIGEST_SIZE);
1952 sha_ctx->diglen = SHA1_DIGEST_SIZE;
1953 _update_sha1_ctx(req);
1954
1955 pstat->sha1_digest++;
1956 return 0;
1957};
1958
1959static int _sha256_init(struct ahash_request *req)
1960{
1961 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1962 struct crypto_priv *cp = sha_ctx->cp;
1963 struct crypto_stat *pstat;
1964
1965 pstat = &_qcrypto_stat[cp->pdev->id];
1966
1967 _sha_init(sha_ctx);
1968 sha_ctx->alg = QCE_HASH_SHA256;
1969
1970 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
1971 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
1972 SHA256_DIGEST_SIZE);
1973 sha_ctx->diglen = SHA256_DIGEST_SIZE;
1974 _update_sha256_ctx(req);
1975
1976 pstat->sha256_digest++;
1977 return 0;
1978};
1979
1980
1981static int _sha1_export(struct ahash_request *req, void *out)
1982{
1983 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
1984 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
1985 struct sha1_state *out_ctx = (struct sha1_state *)out;
1986
1987 out_ctx->count = sha_state_ctx->count;
1988 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
1989 memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
1990
1991 return 0;
1992};
1993
1994static int _sha1_import(struct ahash_request *req, const void *in)
1995{
1996 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
1997 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
1998 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
1999 struct sha1_state *in_ctx = (struct sha1_state *)in;
2000
2001 sha_state_ctx->count = in_ctx->count;
2002 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2003 memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
2004 memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
2005
2006 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2007 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2008 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2009
2010 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2011 (SHA1_BLOCK_SIZE-1));
2012
2013 if (!(in_ctx->count))
2014 sha_ctx->first_blk = 1;
2015 else
2016 sha_ctx->first_blk = 0;
2017
2018 return 0;
2019}
2020static int _sha256_export(struct ahash_request *req, void *out)
2021{
2022 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2023 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2024 struct sha256_state *out_ctx = (struct sha256_state *)out;
2025
2026 out_ctx->count = sha_state_ctx->count;
2027 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
2028 memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
2029
2030 return 0;
2031};
2032
2033static int _sha256_import(struct ahash_request *req, const void *in)
2034{
2035 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2036 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2037 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2038 struct sha256_state *in_ctx = (struct sha256_state *)in;
2039
2040 sha_state_ctx->count = in_ctx->count;
2041 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2042 memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2043 memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2044
2045 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2046 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2047 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2048
2049 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2050 (SHA256_BLOCK_SIZE-1));
2051
2052 if (!(in_ctx->count))
2053 sha_ctx->first_blk = 1;
2054 else
2055 sha_ctx->first_blk = 0;
2056
2057 return 0;
2058}
2059
2060
2061static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
2062{
2063 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2064 struct crypto_priv *cp = sha_ctx->cp;
2065 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2066 uint32_t total, len, i, num_sg;
2067 uint8_t *k_src = NULL;
2068 uint32_t sha_pad_len = 0;
2069 uint32_t end_src = 0;
2070 uint32_t trailing_buf_len = 0;
2071 uint32_t nbytes, index = 0;
2072 uint32_t saved_length = 0;
2073 int ret = 0;
2074
2075 /* check for trailing buffer from previous updates and append it */
2076 total = req->nbytes + sha_ctx->trailing_buf_len;
2077 len = req->nbytes;
2078
2079 if (total <= sha_block_size) {
2080 i = 0;
2081
2082 k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
2083 while (len > 0) {
2084 memcpy(k_src, sg_virt(&req->src[i]),
2085 req->src[i].length);
2086 len -= req->src[i].length;
2087 k_src += req->src[i].length;
2088 i++;
2089 }
2090 sha_ctx->trailing_buf_len = total;
2091 if (sha_ctx->alg == QCE_HASH_SHA1)
2092 _update_sha1_ctx(req);
2093 if (sha_ctx->alg == QCE_HASH_SHA256)
2094 _update_sha256_ctx(req);
2095 return 0;
2096 }
2097
2098 /* save the original req structure fields*/
2099 rctx->src = req->src;
2100 rctx->nbytes = req->nbytes;
2101
2102 memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
2103 sha_ctx->trailing_buf_len);
2104 k_src = &sha_ctx->trailing_buf[0];
2105 /* get new trailing buffer */
2106 sha_pad_len = ALIGN(total, sha_block_size) - total;
2107 trailing_buf_len = sha_block_size - sha_pad_len;
2108 nbytes = total - trailing_buf_len;
2109 num_sg = qcrypto_count_sg(req->src, req->nbytes);
2110
2111 len = sha_ctx->trailing_buf_len;
2112 i = 0;
2113
2114 while (len < nbytes) {
2115 if ((len + req->src[i].length) > nbytes)
2116 break;
2117 len += req->src[i].length;
2118 i++;
2119 }
2120
2121 end_src = i;
2122 if (len < nbytes) {
2123 uint32_t remnant = (nbytes - len);
2124 memcpy(k_src, (sg_virt(&req->src[i]) + remnant),
2125 (req->src[i].length - remnant));
2126 k_src += (req->src[i].length - remnant);
2127 saved_length = req->src[i].length;
2128 index = i;
2129 req->src[i].length = remnant;
2130 i++;
2131 }
2132
2133 while (i < num_sg) {
2134 memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length);
2135 k_src += req->src[i].length;
2136 i++;
2137 }
2138
2139 if (sha_ctx->trailing_buf_len) {
2140 num_sg = end_src + 2;
2141 sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
2142 GFP_KERNEL);
2143 if (sha_ctx->sg == NULL) {
2144 pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
2145 PTR_ERR(sha_ctx->sg));
2146 return -ENOMEM;
2147 }
2148
2149 sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
2150 sha_ctx->trailing_buf_len);
2151 for (i = 1; i < num_sg; i++)
2152 sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
2153 req->src[i-1].length);
2154
2155 req->src = sha_ctx->sg;
2156 sg_mark_end(&sha_ctx->sg[num_sg - 1]);
2157 } else
2158 sg_mark_end(&req->src[end_src]);
2159
2160 req->nbytes = nbytes;
2161 if (saved_length > 0)
2162 rctx->src[index].length = saved_length;
2163 sha_ctx->trailing_buf_len = trailing_buf_len;
2164
2165 ret = _qcrypto_queue_req(cp, &req->base);
2166
2167 return ret;
2168};
2169
2170static int _sha1_update(struct ahash_request *req)
2171{
2172 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2173 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
2174
2175 sha_state_ctx->count += req->nbytes;
2176 return _sha_update(req, SHA1_BLOCK_SIZE);
2177}
2178
2179static int _sha256_update(struct ahash_request *req)
2180{
2181 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2182 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2183
2184 sha_state_ctx->count += req->nbytes;
2185 return _sha_update(req, SHA256_BLOCK_SIZE);
2186}
2187
2188static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
2189{
2190 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2191 struct crypto_priv *cp = sha_ctx->cp;
2192 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2193 int ret = 0;
2194
2195 sha_ctx->last_blk = 1;
2196
2197 /* save the original req structure fields*/
2198 rctx->src = req->src;
2199 rctx->nbytes = req->nbytes;
2200
2201 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2202 sha_ctx->trailing_buf_len);
2203 sg_mark_end(&sha_ctx->tmp_sg);
2204
2205 req->src = &sha_ctx->tmp_sg;
2206 req->nbytes = sha_ctx->trailing_buf_len;
2207
2208 ret = _qcrypto_queue_req(cp, &req->base);
2209
2210 return ret;
2211};
2212
2213static int _sha1_final(struct ahash_request *req)
2214{
2215 return _sha_final(req, SHA1_BLOCK_SIZE);
2216}
2217
2218static int _sha256_final(struct ahash_request *req)
2219{
2220 return _sha_final(req, SHA256_BLOCK_SIZE);
2221}
2222
2223static int _sha_digest(struct ahash_request *req)
2224{
2225 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2226 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2227 struct crypto_priv *cp = sha_ctx->cp;
2228 int ret = 0;
2229
2230 /* save the original req structure fields*/
2231 rctx->src = req->src;
2232 rctx->nbytes = req->nbytes;
2233
2234 sha_ctx->last_blk = 1;
2235 ret = _qcrypto_queue_req(cp, &req->base);
2236
2237 return ret;
2238}
2239
2240static int _sha1_digest(struct ahash_request *req)
2241{
2242 _sha1_init(req);
2243 return _sha_digest(req);
2244}
2245
2246static int _sha256_digest(struct ahash_request *req)
2247{
2248 _sha256_init(req);
2249 return _sha_digest(req);
2250}
2251
2252static void _crypto_sha_hmac_ahash_req_complete(
2253 struct crypto_async_request *req, int err)
2254{
2255 struct completion *ahash_req_complete = req->data;
2256
2257 if (err == -EINPROGRESS)
2258 return;
2259 complete(ahash_req_complete);
2260}
2261
2262static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2263 unsigned int len)
2264{
2265 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2266 int ret = 0;
2267
2268 sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
2269 if (sha_ctx->in_buf == NULL) {
2270 pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
2271 PTR_ERR(sha_ctx->in_buf));
2272 return -ENOMEM;
2273 }
2274 memcpy(sha_ctx->in_buf, key, len);
2275 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
2276 sg_mark_end(&sha_ctx->tmp_sg);
2277
2278 ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
2279 &sha_ctx->authkey[0], len);
2280
2281 ret = _sha_digest(sha_ctx->ahash_req);
2282 if (ret == -EINPROGRESS || ret == -EBUSY) {
2283 ret =
2284 wait_for_completion_interruptible(
2285 &sha_ctx->ahash_req_complete);
2286 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2287 }
2288
2289 sha_ctx->authkey_in_len = len;
2290 kfree(sha_ctx->in_buf);
2291 sha_ctx->in_buf = NULL;
2292
2293 return ret;
2294}
2295
2296static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2297 unsigned int len)
2298{
2299 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2300
2301 if (len <= SHA1_BLOCK_SIZE)
2302 memcpy(&sha_ctx->authkey[0], key, len);
2303 else {
2304 _sha_init(sha_ctx);
2305 sha_ctx->alg = QCE_HASH_SHA1;
2306 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2307 SHA1_DIGEST_SIZE);
2308 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2309 _sha_hmac_setkey(tfm, key, len);
2310 }
2311 return 0;
2312}
2313
2314static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2315 unsigned int len)
2316{
2317 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2318
2319 if (len <= SHA256_BLOCK_SIZE)
2320 memcpy(&sha_ctx->authkey[0], key, len);
2321 else {
2322 _sha_init(sha_ctx);
2323 sha_ctx->alg = QCE_HASH_SHA256;
2324 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2325 SHA256_DIGEST_SIZE);
2326 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2327 _sha_hmac_setkey(tfm, key, len);
2328 }
2329
2330 return 0;
2331}
2332
2333static int _sha_hmac_init_ihash(struct ahash_request *req,
2334 uint32_t sha_block_size)
2335{
2336 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2337 int i;
2338
2339 for (i = 0; i < sha_block_size; i++)
2340 sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
2341 sha_ctx->trailing_buf_len = sha_block_size;
2342
2343 return 0;
2344}
2345
2346static int _sha1_hmac_init(struct ahash_request *req)
2347{
2348 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2349 struct crypto_priv *cp = sha_ctx->cp;
2350 struct crypto_stat *pstat;
2351 int ret = 0;
2352
2353 pstat = &_qcrypto_stat[cp->pdev->id];
2354 pstat->sha1_hmac_digest++;
2355
2356 _sha_init(sha_ctx);
2357 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
2358 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2359 SHA1_DIGEST_SIZE);
2360 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2361 _update_sha1_ctx(req);
2362
2363 if (cp->ce_support.sha_hmac)
2364 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2365 else {
2366 sha_ctx->alg = QCE_HASH_SHA1;
2367 ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
2368 }
2369
2370 return ret;
2371}
2372
2373static int _sha256_hmac_init(struct ahash_request *req)
2374{
2375 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2376 struct crypto_priv *cp = sha_ctx->cp;
2377 struct crypto_stat *pstat;
2378 int ret = 0;
2379
2380 pstat = &_qcrypto_stat[cp->pdev->id];
2381 pstat->sha256_hmac_digest++;
2382
2383 _sha_init(sha_ctx);
2384 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
2385 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2386 SHA256_DIGEST_SIZE);
2387 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2388 _update_sha256_ctx(req);
2389
2390 if (cp->ce_support.sha_hmac)
2391 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2392 else {
2393 sha_ctx->alg = QCE_HASH_SHA256;
2394 ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
2395 }
2396
2397 return ret;
2398}
2399
2400static int _sha1_hmac_update(struct ahash_request *req)
2401{
2402 return _sha1_update(req);
2403}
2404
2405static int _sha256_hmac_update(struct ahash_request *req)
2406{
2407 return _sha256_update(req);
2408}
2409
2410static int _sha_hmac_outer_hash(struct ahash_request *req,
2411 uint32_t sha_digest_size, uint32_t sha_block_size)
2412{
2413 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2414 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2415 struct crypto_priv *cp = sha_ctx->cp;
2416 int i;
2417
2418 for (i = 0; i < sha_block_size; i++)
2419 sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
2420
2421 /* save the original req structure fields*/
2422 rctx->src = req->src;
2423 rctx->nbytes = req->nbytes;
2424
2425 memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
2426 sha_digest_size);
2427
2428 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
2429 sha_digest_size);
2430 sg_mark_end(&sha_ctx->tmp_sg);
2431 req->src = &sha_ctx->tmp_sg;
2432 req->nbytes = sha_block_size + sha_digest_size;
2433
2434 _sha_init(sha_ctx);
2435 if (sha_ctx->alg == QCE_HASH_SHA1) {
2436 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2437 SHA1_DIGEST_SIZE);
2438 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2439 } else {
2440 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2441 SHA256_DIGEST_SIZE);
2442 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2443 }
2444
2445 sha_ctx->last_blk = 1;
2446 return _qcrypto_queue_req(cp, &req->base);
2447}
2448
2449static int _sha_hmac_inner_hash(struct ahash_request *req,
2450 uint32_t sha_digest_size, uint32_t sha_block_size)
2451{
2452 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2453 struct ahash_request *areq = sha_ctx->ahash_req;
2454 struct crypto_priv *cp = sha_ctx->cp;
2455 int ret = 0;
2456
2457 sha_ctx->last_blk = 1;
2458
2459 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2460 sha_ctx->trailing_buf_len);
2461 sg_mark_end(&sha_ctx->tmp_sg);
2462
2463 ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
2464 sha_ctx->trailing_buf_len);
2465 sha_ctx->last_blk = 1;
2466 ret = _qcrypto_queue_req(cp, &areq->base);
2467
2468 if (ret == -EINPROGRESS || ret == -EBUSY) {
2469 ret =
2470 wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
2471 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2472 }
2473
2474 return ret;
2475}
2476
2477static int _sha1_hmac_final(struct ahash_request *req)
2478{
2479 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2480 struct crypto_priv *cp = sha_ctx->cp;
2481 int ret = 0;
2482
2483 if (cp->ce_support.sha_hmac)
2484 return _sha_final(req, SHA1_BLOCK_SIZE);
2485 else {
2486 ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
2487 SHA1_BLOCK_SIZE);
2488 if (ret)
2489 return ret;
2490 return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
2491 SHA1_BLOCK_SIZE);
2492 }
2493}
2494
2495static int _sha256_hmac_final(struct ahash_request *req)
2496{
2497 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2498 struct crypto_priv *cp = sha_ctx->cp;
2499 int ret = 0;
2500
2501 if (cp->ce_support.sha_hmac)
2502 return _sha_final(req, SHA256_BLOCK_SIZE);
2503 else {
2504 ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
2505 SHA256_BLOCK_SIZE);
2506 if (ret)
2507 return ret;
2508 return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
2509 SHA256_BLOCK_SIZE);
2510 }
2511 return 0;
2512}
2513
2514
2515static int _sha1_hmac_digest(struct ahash_request *req)
2516{
2517 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2518 struct crypto_priv *cp = sha_ctx->cp;
2519 struct crypto_stat *pstat;
2520
2521 pstat = &_qcrypto_stat[cp->pdev->id];
2522 pstat->sha1_hmac_digest++;
2523
2524 _sha_init(sha_ctx);
2525 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2526 SHA1_DIGEST_SIZE);
2527 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2528 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2529
2530 return _sha_digest(req);
2531}
2532
2533static int _sha256_hmac_digest(struct ahash_request *req)
2534{
2535 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2536 struct crypto_priv *cp = sha_ctx->cp;
2537 struct crypto_stat *pstat;
2538
2539 pstat = &_qcrypto_stat[cp->pdev->id];
2540 pstat->sha256_hmac_digest++;
2541
2542 _sha_init(sha_ctx);
2543 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2544 SHA256_DIGEST_SIZE);
2545 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2546 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2547
2548 return _sha_digest(req);
2549}
2550
2551static struct ahash_alg _qcrypto_ahash_algos[] = {
2552 {
2553 .init = _sha1_init,
2554 .update = _sha1_update,
2555 .final = _sha1_final,
2556 .export = _sha1_export,
2557 .import = _sha1_import,
2558 .digest = _sha1_digest,
2559 .halg = {
2560 .digestsize = SHA1_DIGEST_SIZE,
2561 .statesize = sizeof(struct sha1_state),
2562 .base = {
2563 .cra_name = "sha1",
2564 .cra_driver_name = "qcrypto-sha1",
2565 .cra_priority = 300,
2566 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2567 CRYPTO_ALG_ASYNC,
2568 .cra_blocksize = SHA1_BLOCK_SIZE,
2569 .cra_ctxsize =
2570 sizeof(struct qcrypto_sha_ctx),
2571 .cra_alignmask = 0,
2572 .cra_type = &crypto_ahash_type,
2573 .cra_module = THIS_MODULE,
2574 .cra_init = _qcrypto_ahash_cra_init,
2575 .cra_exit = _qcrypto_ahash_cra_exit,
2576 },
2577 },
2578 },
2579 {
2580 .init = _sha256_init,
2581 .update = _sha256_update,
2582 .final = _sha256_final,
2583 .export = _sha256_export,
2584 .import = _sha256_import,
2585 .digest = _sha256_digest,
2586 .halg = {
2587 .digestsize = SHA256_DIGEST_SIZE,
2588 .statesize = sizeof(struct sha256_state),
2589 .base = {
2590 .cra_name = "sha256",
2591 .cra_driver_name = "qcrypto-sha256",
2592 .cra_priority = 300,
2593 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2594 CRYPTO_ALG_ASYNC,
2595 .cra_blocksize = SHA256_BLOCK_SIZE,
2596 .cra_ctxsize =
2597 sizeof(struct qcrypto_sha_ctx),
2598 .cra_alignmask = 0,
2599 .cra_type = &crypto_ahash_type,
2600 .cra_module = THIS_MODULE,
2601 .cra_init = _qcrypto_ahash_cra_init,
2602 .cra_exit = _qcrypto_ahash_cra_exit,
2603 },
2604 },
2605 },
2606};
2607
2608static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
2609 {
2610 .init = _sha1_hmac_init,
2611 .update = _sha1_hmac_update,
2612 .final = _sha1_hmac_final,
2613 .export = _sha1_export,
2614 .import = _sha1_import,
2615 .digest = _sha1_hmac_digest,
2616 .setkey = _sha1_hmac_setkey,
2617 .halg = {
2618 .digestsize = SHA1_DIGEST_SIZE,
2619 .statesize = sizeof(struct sha1_state),
2620 .base = {
2621 .cra_name = "hmac(sha1)",
2622 .cra_driver_name = "qcrypto-hmac-sha1",
2623 .cra_priority = 300,
2624 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2625 CRYPTO_ALG_ASYNC,
2626 .cra_blocksize = SHA1_BLOCK_SIZE,
2627 .cra_ctxsize =
2628 sizeof(struct qcrypto_sha_ctx),
2629 .cra_alignmask = 0,
2630 .cra_type = &crypto_ahash_type,
2631 .cra_module = THIS_MODULE,
2632 .cra_init = _qcrypto_ahash_hmac_cra_init,
2633 .cra_exit = _qcrypto_ahash_cra_exit,
2634 },
2635 },
2636 },
2637 {
2638 .init = _sha256_hmac_init,
2639 .update = _sha256_hmac_update,
2640 .final = _sha256_hmac_final,
2641 .export = _sha256_export,
2642 .import = _sha256_import,
2643 .digest = _sha256_hmac_digest,
2644 .setkey = _sha256_hmac_setkey,
2645 .halg = {
2646 .digestsize = SHA256_DIGEST_SIZE,
2647 .statesize = sizeof(struct sha256_state),
2648 .base = {
2649 .cra_name = "hmac(sha256)",
2650 .cra_driver_name = "qcrypto-hmac-sha256",
2651 .cra_priority = 300,
2652 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2653 CRYPTO_ALG_ASYNC,
2654 .cra_blocksize = SHA256_BLOCK_SIZE,
2655 .cra_ctxsize =
2656 sizeof(struct qcrypto_sha_ctx),
2657 .cra_alignmask = 0,
2658 .cra_type = &crypto_ahash_type,
2659 .cra_module = THIS_MODULE,
2660 .cra_init = _qcrypto_ahash_hmac_cra_init,
2661 .cra_exit = _qcrypto_ahash_cra_exit,
2662 },
2663 },
2664 },
2665};
2666
2667static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
2668 {
2669 .cra_name = "ecb(aes)",
2670 .cra_driver_name = "qcrypto-ecb-aes",
2671 .cra_priority = 300,
2672 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2673 .cra_blocksize = AES_BLOCK_SIZE,
2674 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2675 .cra_alignmask = 0,
2676 .cra_type = &crypto_ablkcipher_type,
2677 .cra_module = THIS_MODULE,
2678 .cra_init = _qcrypto_cra_ablkcipher_init,
2679 .cra_u = {
2680 .ablkcipher = {
2681 .min_keysize = AES_MIN_KEY_SIZE,
2682 .max_keysize = AES_MAX_KEY_SIZE,
2683 .setkey = _qcrypto_setkey_aes,
2684 .encrypt = _qcrypto_enc_aes_ecb,
2685 .decrypt = _qcrypto_dec_aes_ecb,
2686 },
2687 },
2688 },
2689 {
2690 .cra_name = "cbc(aes)",
2691 .cra_driver_name = "qcrypto-cbc-aes",
2692 .cra_priority = 300,
2693 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2694 .cra_blocksize = AES_BLOCK_SIZE,
2695 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2696 .cra_alignmask = 0,
2697 .cra_type = &crypto_ablkcipher_type,
2698 .cra_module = THIS_MODULE,
2699 .cra_init = _qcrypto_cra_ablkcipher_init,
2700 .cra_u = {
2701 .ablkcipher = {
2702 .ivsize = AES_BLOCK_SIZE,
2703 .min_keysize = AES_MIN_KEY_SIZE,
2704 .max_keysize = AES_MAX_KEY_SIZE,
2705 .setkey = _qcrypto_setkey_aes,
2706 .encrypt = _qcrypto_enc_aes_cbc,
2707 .decrypt = _qcrypto_dec_aes_cbc,
2708 },
2709 },
2710 },
2711 {
2712 .cra_name = "ctr(aes)",
2713 .cra_driver_name = "qcrypto-ctr-aes",
2714 .cra_priority = 300,
2715 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2716 .cra_blocksize = AES_BLOCK_SIZE,
2717 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2718 .cra_alignmask = 0,
2719 .cra_type = &crypto_ablkcipher_type,
2720 .cra_module = THIS_MODULE,
2721 .cra_init = _qcrypto_cra_ablkcipher_init,
2722 .cra_u = {
2723 .ablkcipher = {
2724 .ivsize = AES_BLOCK_SIZE,
2725 .min_keysize = AES_MIN_KEY_SIZE,
2726 .max_keysize = AES_MAX_KEY_SIZE,
2727 .setkey = _qcrypto_setkey_aes,
2728 .encrypt = _qcrypto_enc_aes_ctr,
2729 .decrypt = _qcrypto_dec_aes_ctr,
2730 },
2731 },
2732 },
2733 {
2734 .cra_name = "ecb(des)",
2735 .cra_driver_name = "qcrypto-ecb-des",
2736 .cra_priority = 300,
2737 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2738 .cra_blocksize = DES_BLOCK_SIZE,
2739 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2740 .cra_alignmask = 0,
2741 .cra_type = &crypto_ablkcipher_type,
2742 .cra_module = THIS_MODULE,
2743 .cra_init = _qcrypto_cra_ablkcipher_init,
2744 .cra_u = {
2745 .ablkcipher = {
2746 .min_keysize = DES_KEY_SIZE,
2747 .max_keysize = DES_KEY_SIZE,
2748 .setkey = _qcrypto_setkey_des,
2749 .encrypt = _qcrypto_enc_des_ecb,
2750 .decrypt = _qcrypto_dec_des_ecb,
2751 },
2752 },
2753 },
2754 {
2755 .cra_name = "cbc(des)",
2756 .cra_driver_name = "qcrypto-cbc-des",
2757 .cra_priority = 300,
2758 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2759 .cra_blocksize = DES_BLOCK_SIZE,
2760 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2761 .cra_alignmask = 0,
2762 .cra_type = &crypto_ablkcipher_type,
2763 .cra_module = THIS_MODULE,
2764 .cra_init = _qcrypto_cra_ablkcipher_init,
2765 .cra_u = {
2766 .ablkcipher = {
2767 .ivsize = DES_BLOCK_SIZE,
2768 .min_keysize = DES_KEY_SIZE,
2769 .max_keysize = DES_KEY_SIZE,
2770 .setkey = _qcrypto_setkey_des,
2771 .encrypt = _qcrypto_enc_des_cbc,
2772 .decrypt = _qcrypto_dec_des_cbc,
2773 },
2774 },
2775 },
2776 {
2777 .cra_name = "ecb(des3_ede)",
2778 .cra_driver_name = "qcrypto-ecb-3des",
2779 .cra_priority = 300,
2780 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2781 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2782 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2783 .cra_alignmask = 0,
2784 .cra_type = &crypto_ablkcipher_type,
2785 .cra_module = THIS_MODULE,
2786 .cra_init = _qcrypto_cra_ablkcipher_init,
2787 .cra_u = {
2788 .ablkcipher = {
2789 .min_keysize = DES3_EDE_KEY_SIZE,
2790 .max_keysize = DES3_EDE_KEY_SIZE,
2791 .setkey = _qcrypto_setkey_3des,
2792 .encrypt = _qcrypto_enc_3des_ecb,
2793 .decrypt = _qcrypto_dec_3des_ecb,
2794 },
2795 },
2796 },
2797 {
2798 .cra_name = "cbc(des3_ede)",
2799 .cra_driver_name = "qcrypto-cbc-3des",
2800 .cra_priority = 300,
2801 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2802 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2803 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2804 .cra_alignmask = 0,
2805 .cra_type = &crypto_ablkcipher_type,
2806 .cra_module = THIS_MODULE,
2807 .cra_init = _qcrypto_cra_ablkcipher_init,
2808 .cra_u = {
2809 .ablkcipher = {
2810 .ivsize = DES3_EDE_BLOCK_SIZE,
2811 .min_keysize = DES3_EDE_KEY_SIZE,
2812 .max_keysize = DES3_EDE_KEY_SIZE,
2813 .setkey = _qcrypto_setkey_3des,
2814 .encrypt = _qcrypto_enc_3des_cbc,
2815 .decrypt = _qcrypto_dec_3des_cbc,
2816 },
2817 },
2818 },
2819};
2820
2821static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
2822 .cra_name = "xts(aes)",
2823 .cra_driver_name = "qcrypto-xts-aes",
2824 .cra_priority = 300,
2825 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2826 .cra_blocksize = AES_BLOCK_SIZE,
2827 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2828 .cra_alignmask = 0,
2829 .cra_type = &crypto_ablkcipher_type,
2830 .cra_module = THIS_MODULE,
2831 .cra_init = _qcrypto_cra_ablkcipher_init,
2832 .cra_u = {
2833 .ablkcipher = {
2834 .ivsize = AES_BLOCK_SIZE,
2835 .min_keysize = AES_MIN_KEY_SIZE,
2836 .max_keysize = AES_MAX_KEY_SIZE,
2837 .setkey = _qcrypto_setkey_aes,
2838 .encrypt = _qcrypto_enc_aes_xts,
2839 .decrypt = _qcrypto_dec_aes_xts,
2840 },
2841 },
2842};
2843
2844static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
2845 {
2846 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2847 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
2848 .cra_priority = 300,
2849 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2850 .cra_blocksize = AES_BLOCK_SIZE,
2851 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2852 .cra_alignmask = 0,
2853 .cra_type = &crypto_aead_type,
2854 .cra_module = THIS_MODULE,
2855 .cra_init = _qcrypto_cra_aead_init,
2856 .cra_u = {
2857 .aead = {
2858 .ivsize = AES_BLOCK_SIZE,
2859 .maxauthsize = SHA1_DIGEST_SIZE,
2860 .setkey = _qcrypto_aead_setkey,
2861 .setauthsize = _qcrypto_aead_setauthsize,
2862 .encrypt = _qcrypto_aead_encrypt_aes_cbc,
2863 .decrypt = _qcrypto_aead_decrypt_aes_cbc,
2864 .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
2865 .geniv = "<built-in>",
2866 }
2867 }
2868 },
2869
2870#ifdef QCRYPTO_AEAD_AES_CTR
2871 {
2872 .cra_name = "authenc(hmac(sha1),ctr(aes))",
2873 .cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes",
2874 .cra_priority = 300,
2875 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2876 .cra_blocksize = AES_BLOCK_SIZE,
2877 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2878 .cra_alignmask = 0,
2879 .cra_type = &crypto_aead_type,
2880 .cra_module = THIS_MODULE,
2881 .cra_init = _qcrypto_cra_aead_init,
2882 .cra_u = {
2883 .aead = {
2884 .ivsize = AES_BLOCK_SIZE,
2885 .maxauthsize = SHA1_DIGEST_SIZE,
2886 .setkey = _qcrypto_aead_setkey,
2887 .setauthsize = _qcrypto_aead_setauthsize,
2888 .encrypt = _qcrypto_aead_encrypt_aes_ctr,
2889 .decrypt = _qcrypto_aead_decrypt_aes_ctr,
2890 .givencrypt = _qcrypto_aead_givencrypt_aes_ctr,
2891 .geniv = "<built-in>",
2892 }
2893 }
2894 },
2895#endif /* QCRYPTO_AEAD_AES_CTR */
2896 {
2897 .cra_name = "authenc(hmac(sha1),cbc(des))",
2898 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
2899 .cra_priority = 300,
2900 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2901 .cra_blocksize = DES_BLOCK_SIZE,
2902 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2903 .cra_alignmask = 0,
2904 .cra_type = &crypto_aead_type,
2905 .cra_module = THIS_MODULE,
2906 .cra_init = _qcrypto_cra_aead_init,
2907 .cra_u = {
2908 .aead = {
2909 .ivsize = DES_BLOCK_SIZE,
2910 .maxauthsize = SHA1_DIGEST_SIZE,
2911 .setkey = _qcrypto_aead_setkey,
2912 .setauthsize = _qcrypto_aead_setauthsize,
2913 .encrypt = _qcrypto_aead_encrypt_des_cbc,
2914 .decrypt = _qcrypto_aead_decrypt_des_cbc,
2915 .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
2916 .geniv = "<built-in>",
2917 }
2918 }
2919 },
2920 {
2921 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2922 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
2923 .cra_priority = 300,
2924 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2925 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2926 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2927 .cra_alignmask = 0,
2928 .cra_type = &crypto_aead_type,
2929 .cra_module = THIS_MODULE,
2930 .cra_init = _qcrypto_cra_aead_init,
2931 .cra_u = {
2932 .aead = {
2933 .ivsize = DES3_EDE_BLOCK_SIZE,
2934 .maxauthsize = SHA1_DIGEST_SIZE,
2935 .setkey = _qcrypto_aead_setkey,
2936 .setauthsize = _qcrypto_aead_setauthsize,
2937 .encrypt = _qcrypto_aead_encrypt_3des_cbc,
2938 .decrypt = _qcrypto_aead_decrypt_3des_cbc,
2939 .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
2940 .geniv = "<built-in>",
2941 }
2942 }
2943 },
2944};
2945
2946static struct crypto_alg _qcrypto_aead_ccm_algo = {
2947 .cra_name = "ccm(aes)",
2948 .cra_driver_name = "qcrypto-aes-ccm",
2949 .cra_priority = 300,
2950 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2951 .cra_blocksize = AES_BLOCK_SIZE,
2952 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2953 .cra_alignmask = 0,
2954 .cra_type = &crypto_aead_type,
2955 .cra_module = THIS_MODULE,
2956 .cra_init = _qcrypto_cra_aead_init,
2957 .cra_u = {
2958 .aead = {
2959 .ivsize = AES_BLOCK_SIZE,
2960 .maxauthsize = SHA1_DIGEST_SIZE,
2961 .setkey = _qcrypto_aead_ccm_setkey,
2962 .setauthsize = _qcrypto_aead_ccm_setauthsize,
2963 .encrypt = _qcrypto_aead_encrypt_aes_ccm,
2964 .decrypt = _qcrypto_aead_decrypt_aes_ccm,
2965 .geniv = "<built-in>",
2966 }
2967 }
2968};
2969
2970
2971static int _qcrypto_probe(struct platform_device *pdev)
2972{
2973 int rc = 0;
2974 void *handle;
2975 struct crypto_priv *cp;
2976 int i;
2977 struct msm_ce_hw_support *platform_support;
2978
2979 if (pdev->id >= MAX_CRYPTO_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07002980 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002981 __func__, pdev->id, MAX_CRYPTO_DEVICE);
2982 return -ENOENT;
2983 }
2984
2985 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2986 if (!cp) {
2987 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
2988 PTR_ERR(cp));
2989 return -ENOMEM;
2990 }
2991
2992 /* open qce */
2993 handle = qce_open(pdev, &rc);
2994 if (handle == NULL) {
2995 kfree(cp);
2996 platform_set_drvdata(pdev, NULL);
2997 return rc;
2998 }
2999
3000 INIT_LIST_HEAD(&cp->alg_list);
3001 platform_set_drvdata(pdev, cp);
3002 spin_lock_init(&cp->lock);
3003 tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
3004 crypto_init_queue(&cp->queue, 50);
3005 cp->qce = handle;
3006 cp->pdev = pdev;
3007 qce_hw_support(cp->qce, &cp->ce_support);
3008 platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
3009 cp->platform_support.ce_shared = platform_support->ce_shared;
3010 cp->platform_support.shared_ce_resource =
3011 platform_support->shared_ce_resource;
3012 cp->platform_support.hw_key_support =
3013 platform_support->hw_key_support;
3014 cp->ce_lock_count = 0;
3015 cp->platform_support.sha_hmac = platform_support->sha_hmac;
3016
3017 if (cp->platform_support.ce_shared)
3018 INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
3019
3020 /* register crypto cipher algorithms the device supports */
3021 for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
3022 struct qcrypto_alg *q_alg;
3023
3024 q_alg = _qcrypto_cipher_alg_alloc(cp,
3025 &_qcrypto_ablk_cipher_algos[i]);
3026 if (IS_ERR(q_alg)) {
3027 rc = PTR_ERR(q_alg);
3028 goto err;
3029 }
3030 rc = crypto_register_alg(&q_alg->cipher_alg);
3031 if (rc) {
3032 dev_err(&pdev->dev, "%s alg registration failed\n",
3033 q_alg->cipher_alg.cra_driver_name);
3034 kfree(q_alg);
3035 } else {
3036 list_add_tail(&q_alg->entry, &cp->alg_list);
3037 dev_info(&pdev->dev, "%s\n",
3038 q_alg->cipher_alg.cra_driver_name);
3039 }
3040 }
3041
3042 /* register crypto cipher algorithms the device supports */
3043 if (cp->ce_support.aes_xts) {
3044 struct qcrypto_alg *q_alg;
3045
3046 q_alg = _qcrypto_cipher_alg_alloc(cp,
3047 &_qcrypto_ablk_cipher_xts_algo);
3048 if (IS_ERR(q_alg)) {
3049 rc = PTR_ERR(q_alg);
3050 goto err;
3051 }
3052 rc = crypto_register_alg(&q_alg->cipher_alg);
3053 if (rc) {
3054 dev_err(&pdev->dev, "%s alg registration failed\n",
3055 q_alg->cipher_alg.cra_driver_name);
3056 kfree(q_alg);
3057 } else {
3058 list_add_tail(&q_alg->entry, &cp->alg_list);
3059 dev_info(&pdev->dev, "%s\n",
3060 q_alg->cipher_alg.cra_driver_name);
3061 }
3062 }
3063
3064 /*
3065 * Register crypto hash (sha1 and sha256) algorithms the
3066 * device supports
3067 */
3068 for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
3069 struct qcrypto_alg *q_alg = NULL;
3070
3071 q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
3072
3073 if (IS_ERR(q_alg)) {
3074 rc = PTR_ERR(q_alg);
3075 goto err;
3076 }
3077
3078 rc = crypto_register_ahash(&q_alg->sha_alg);
3079 if (rc) {
3080 dev_err(&pdev->dev, "%s alg registration failed\n",
3081 q_alg->sha_alg.halg.base.cra_driver_name);
3082 kfree(q_alg);
3083 } else {
3084 list_add_tail(&q_alg->entry, &cp->alg_list);
3085 dev_info(&pdev->dev, "%s\n",
3086 q_alg->sha_alg.halg.base.cra_driver_name);
3087 }
3088 }
3089
3090 /* register crypto aead (hmac-sha1) algorithms the device supports */
3091 if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) {
3092 for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
3093 i++) {
3094 struct qcrypto_alg *q_alg;
3095
3096 q_alg = _qcrypto_cipher_alg_alloc(cp,
3097 &_qcrypto_aead_sha1_hmac_algos[i]);
3098 if (IS_ERR(q_alg)) {
3099 rc = PTR_ERR(q_alg);
3100 goto err;
3101 }
3102
3103 rc = crypto_register_alg(&q_alg->cipher_alg);
3104 if (rc) {
3105 dev_err(&pdev->dev,
3106 "%s alg registration failed\n",
3107 q_alg->cipher_alg.cra_driver_name);
3108 kfree(q_alg);
3109 } else {
3110 list_add_tail(&q_alg->entry, &cp->alg_list);
3111 dev_info(&pdev->dev, "%s\n",
3112 q_alg->cipher_alg.cra_driver_name);
3113 }
3114 }
3115 }
3116
3117 if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
3118 /* register crypto hmac algorithms the device supports */
3119 for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
3120 struct qcrypto_alg *q_alg = NULL;
3121
3122 q_alg = _qcrypto_sha_alg_alloc(cp,
3123 &_qcrypto_sha_hmac_algos[i]);
3124
3125 if (IS_ERR(q_alg)) {
3126 rc = PTR_ERR(q_alg);
3127 goto err;
3128 }
3129
3130 rc = crypto_register_ahash(&q_alg->sha_alg);
3131 if (rc) {
3132 dev_err(&pdev->dev,
3133 "%s alg registration failed\n",
3134 q_alg->sha_alg.halg.base.cra_driver_name);
3135 kfree(q_alg);
3136 } else {
3137 list_add_tail(&q_alg->entry, &cp->alg_list);
3138 dev_info(&pdev->dev, "%s\n",
3139 q_alg->sha_alg.halg.base.cra_driver_name);
3140 }
3141 }
3142 }
3143 /*
3144 * Register crypto cipher (aes-ccm) algorithms the
3145 * device supports
3146 */
3147 if (cp->ce_support.aes_ccm) {
3148 struct qcrypto_alg *q_alg;
3149
3150 q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
3151 if (IS_ERR(q_alg)) {
3152 rc = PTR_ERR(q_alg);
3153 goto err;
3154 }
3155 rc = crypto_register_alg(&q_alg->cipher_alg);
3156 if (rc) {
3157 dev_err(&pdev->dev, "%s alg registration failed\n",
3158 q_alg->cipher_alg.cra_driver_name);
3159 kfree(q_alg);
3160 } else {
3161 list_add_tail(&q_alg->entry, &cp->alg_list);
3162 dev_info(&pdev->dev, "%s\n",
3163 q_alg->cipher_alg.cra_driver_name);
3164 }
3165 }
3166
3167 return 0;
3168err:
3169 _qcrypto_remove(pdev);
3170 return rc;
3171};
3172
3173static struct platform_driver _qualcomm_crypto = {
3174 .probe = _qcrypto_probe,
3175 .remove = _qcrypto_remove,
3176 .driver = {
3177 .owner = THIS_MODULE,
3178 .name = "qcrypto",
3179 },
3180};
3181
3182static int _debug_qcrypto[MAX_CRYPTO_DEVICE];
3183
3184static int _debug_stats_open(struct inode *inode, struct file *file)
3185{
3186 file->private_data = inode->i_private;
3187 return 0;
3188}
3189
3190static ssize_t _debug_stats_read(struct file *file, char __user *buf,
3191 size_t count, loff_t *ppos)
3192{
3193 int rc = -EINVAL;
3194 int qcrypto = *((int *) file->private_data);
3195 int len;
3196
3197 len = _disp_stats(qcrypto);
3198
3199 rc = simple_read_from_buffer((void __user *) buf, len,
3200 ppos, (void *) _debug_read_buf, len);
3201
3202 return rc;
3203}
3204
3205static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
3206 size_t count, loff_t *ppos)
3207{
3208
3209 int qcrypto = *((int *) file->private_data);
3210
3211 memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat));
3212 return count;
3213};
3214
3215static const struct file_operations _debug_stats_ops = {
3216 .open = _debug_stats_open,
3217 .read = _debug_stats_read,
3218 .write = _debug_stats_write,
3219};
3220
3221static int _qcrypto_debug_init(void)
3222{
3223 int rc;
3224 char name[DEBUG_MAX_FNAME];
3225 int i;
3226 struct dentry *dent;
3227
3228 _debug_dent = debugfs_create_dir("qcrypto", NULL);
3229 if (IS_ERR(_debug_dent)) {
3230 pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
3231 PTR_ERR(_debug_dent));
3232 return PTR_ERR(_debug_dent);
3233 }
3234
3235 for (i = 0; i < MAX_CRYPTO_DEVICE; i++) {
3236 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
3237 _debug_qcrypto[i] = i;
3238 dent = debugfs_create_file(name, 0644, _debug_dent,
3239 &_debug_qcrypto[i], &_debug_stats_ops);
3240 if (dent == NULL) {
3241 pr_err("qcrypto debugfs_create_file fail, error %ld\n",
3242 PTR_ERR(dent));
3243 rc = PTR_ERR(dent);
3244 goto err;
3245 }
3246 }
3247 return 0;
3248err:
3249 debugfs_remove_recursive(_debug_dent);
3250 return rc;
3251}
3252
3253static int __init _qcrypto_init(void)
3254{
3255 int rc;
3256
3257 rc = _qcrypto_debug_init();
3258 if (rc)
3259 return rc;
3260
3261 return platform_driver_register(&_qualcomm_crypto);
3262}
3263
3264static void __exit _qcrypto_exit(void)
3265{
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003266 pr_debug("%s Unregister QCRYPTO\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003267 debugfs_remove_recursive(_debug_dent);
3268 platform_driver_unregister(&_qualcomm_crypto);
3269}
3270
3271module_init(_qcrypto_init);
3272module_exit(_qcrypto_exit);
3273
3274MODULE_LICENSE("GPL v2");
3275MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
3276MODULE_DESCRIPTION("Qualcomm Crypto driver");
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003277MODULE_VERSION("1.19");