blob: d97c845404d49beb0e962bf9458bdcab4ba16f1d [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto driver
2 *
Duy Truonge833aca2013-02-12 13:35:08 -08003 * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Steve Mucklef132c6c2012-06-06 18:30:57 -070015#include <linux/module.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070016#include <linux/clk.h>
17#include <linux/types.h>
18#include <linux/platform_device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dmapool.h>
21#include <linux/crypto.h>
22#include <linux/kernel.h>
23#include <linux/rtnetlink.h>
24#include <linux/interrupt.h>
25#include <linux/spinlock.h>
26#include <linux/debugfs.h>
27
28#include <crypto/ctr.h>
29#include <crypto/des.h>
30#include <crypto/aes.h>
31#include <crypto/sha.h>
32#include <crypto/hash.h>
33#include <crypto/algapi.h>
34#include <crypto/aead.h>
35#include <crypto/authenc.h>
36#include <crypto/scatterwalk.h>
37#include <crypto/internal/hash.h>
38
39#include <mach/scm.h>
40#include <linux/platform_data/qcom_crypto_device.h>
Ramesh Masavarapu49259682011-12-02 14:00:18 -080041#include <mach/msm_bus.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070042#include "qce.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
44
45#define MAX_CRYPTO_DEVICE 3
46#define DEBUG_MAX_FNAME 16
47#define DEBUG_MAX_RW_BUF 1024
48
49struct crypto_stat {
50 u32 aead_sha1_aes_enc;
51 u32 aead_sha1_aes_dec;
52 u32 aead_sha1_des_enc;
53 u32 aead_sha1_des_dec;
54 u32 aead_sha1_3des_enc;
55 u32 aead_sha1_3des_dec;
56 u32 aead_op_success;
57 u32 aead_op_fail;
58 u32 ablk_cipher_aes_enc;
59 u32 ablk_cipher_aes_dec;
60 u32 ablk_cipher_des_enc;
61 u32 ablk_cipher_des_dec;
62 u32 ablk_cipher_3des_enc;
63 u32 ablk_cipher_3des_dec;
64 u32 ablk_cipher_op_success;
65 u32 ablk_cipher_op_fail;
66 u32 sha1_digest;
67 u32 sha256_digest;
68 u32 sha_op_success;
69 u32 sha_op_fail;
70 u32 sha1_hmac_digest;
71 u32 sha256_hmac_digest;
72 u32 sha_hmac_op_success;
73 u32 sha_hmac_op_fail;
74};
75static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE];
76static struct dentry *_debug_dent;
77static char _debug_read_buf[DEBUG_MAX_RW_BUF];
78
79struct crypto_priv {
80 /* CE features supported by target device*/
81 struct msm_ce_hw_support platform_support;
82
83 /* CE features/algorithms supported by HW engine*/
84 struct ce_hw_support ce_support;
Ramesh Masavarapu49259682011-12-02 14:00:18 -080085
86 uint32_t bus_scale_handle;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 /* the lock protects queue and req*/
88 spinlock_t lock;
89
90 /* qce handle */
91 void *qce;
92
93 /* list of registered algorithms */
94 struct list_head alg_list;
95
96 /* platform device */
97 struct platform_device *pdev;
98
99 /* current active request */
100 struct crypto_async_request *req;
101 int res;
102
103 /* request queue */
104 struct crypto_queue queue;
105
106 uint32_t ce_lock_count;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800107 uint32_t high_bw_req_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108
109 struct work_struct unlock_ce_ws;
110
111 struct tasklet_struct done_tasklet;
112};
113
114
115/*-------------------------------------------------------------------------
116* Resource Locking Service
117* ------------------------------------------------------------------------*/
118#define QCRYPTO_CMD_ID 1
119#define QCRYPTO_CE_LOCK_CMD 1
120#define QCRYPTO_CE_UNLOCK_CMD 0
121#define NUM_RETRY 1000
122#define CE_BUSY 55
123
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800124static DEFINE_MUTEX(sent_bw_req);
125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126static int qcrypto_scm_cmd(int resource, int cmd, int *response)
127{
128#ifdef CONFIG_MSM_SCM
129
130 struct {
131 int resource;
132 int cmd;
133 } cmd_buf;
134
135 cmd_buf.resource = resource;
136 cmd_buf.cmd = cmd;
137
138 return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf,
139 sizeof(cmd_buf), response, sizeof(*response));
140
141#else
142 return 0;
143#endif
144}
145
146static void qcrypto_unlock_ce(struct work_struct *work)
147{
148 int response = 0;
149 unsigned long flags;
150 struct crypto_priv *cp = container_of(work, struct crypto_priv,
151 unlock_ce_ws);
152 if (cp->ce_lock_count == 1)
153 BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource,
154 QCRYPTO_CE_UNLOCK_CMD, &response) != 0);
155 spin_lock_irqsave(&cp->lock, flags);
156 cp->ce_lock_count--;
157 spin_unlock_irqrestore(&cp->lock, flags);
158}
159
160static int qcrypto_lock_ce(struct crypto_priv *cp)
161{
162 unsigned long flags;
163 int response = -CE_BUSY;
164 int i = 0;
165
166 if (cp->ce_lock_count == 0) {
167 do {
168 if (qcrypto_scm_cmd(
169 cp->platform_support.shared_ce_resource,
170 QCRYPTO_CE_LOCK_CMD, &response)) {
171 response = -EINVAL;
172 break;
173 }
174 } while ((response == -CE_BUSY) && (i++ < NUM_RETRY));
175
176 if ((response == -CE_BUSY) && (i >= NUM_RETRY))
177 return -EUSERS;
178 if (response < 0)
179 return -EINVAL;
180 }
181 spin_lock_irqsave(&cp->lock, flags);
182 cp->ce_lock_count++;
183 spin_unlock_irqrestore(&cp->lock, flags);
184
185
186 return 0;
187}
188
189enum qcrypto_alg_type {
190 QCRYPTO_ALG_CIPHER = 0,
191 QCRYPTO_ALG_SHA = 1,
192 QCRYPTO_ALG_LAST
193};
194
195struct qcrypto_alg {
196 struct list_head entry;
197 struct crypto_alg cipher_alg;
198 struct ahash_alg sha_alg;
199 enum qcrypto_alg_type alg_type;
200 struct crypto_priv *cp;
201};
202
203#define QCRYPTO_MAX_KEY_SIZE 64
204/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
205#define QCRYPTO_MAX_IV_LENGTH 16
206
207struct qcrypto_cipher_ctx {
208 u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
209 u8 iv[QCRYPTO_MAX_IV_LENGTH];
210
211 u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
212 unsigned int enc_key_len;
213
214 unsigned int authsize;
215 unsigned int auth_key_len;
216
217 struct crypto_priv *cp;
218};
219
220struct qcrypto_cipher_req_ctx {
221 u8 *iv;
222 unsigned int ivsize;
223 int aead;
224 struct scatterlist asg; /* Formatted associated data sg */
225 unsigned char *assoc; /* Pointer to formatted assoc data */
226 unsigned int assoclen; /* Save Unformatted assoc data length */
227 struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */
228 enum qce_cipher_alg_enum alg;
229 enum qce_cipher_dir_enum dir;
230 enum qce_cipher_mode_enum mode;
Mona Hossainb43e94b2012-05-07 08:52:06 -0700231
232 struct scatterlist *orig_src; /* Original src sg ptr */
233 struct scatterlist *orig_dst; /* Original dst sg ptr */
234 struct scatterlist dsg; /* Dest Data sg */
235 struct scatterlist ssg; /* Source Data sg */
236 unsigned char *data; /* Incoming data pointer*/
237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238};
239
240#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE
241#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32))
242#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE
243
244static uint8_t _std_init_vector_sha1_uint8[] = {
245 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
246 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
247 0xC3, 0xD2, 0xE1, 0xF0
248};
249
250/* standard initialization vector for SHA-256, source: FIPS 180-2 */
251static uint8_t _std_init_vector_sha256_uint8[] = {
252 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
253 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
254 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
255 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
256};
257
258struct qcrypto_sha_ctx {
259 enum qce_hash_alg_enum alg;
260 uint32_t byte_count[4];
261 uint8_t digest[SHA_MAX_DIGEST_SIZE];
262 uint32_t diglen;
263 uint8_t *tmp_tbuf;
264 uint8_t *trailing_buf;
265 uint8_t *in_buf;
266 uint32_t authkey_in_len;
267 uint32_t trailing_buf_len;
268 uint8_t first_blk;
269 uint8_t last_blk;
270 uint8_t authkey[SHA_MAX_BLOCK_SIZE];
271 struct ahash_request *ahash_req;
272 struct completion ahash_req_complete;
273 struct scatterlist *sg;
274 struct scatterlist tmp_sg;
275 struct crypto_priv *cp;
276};
277
278struct qcrypto_sha_req_ctx {
279 union {
280 struct sha1_state sha1_state_ctx;
281 struct sha256_state sha256_state_ctx;
282 };
283 struct scatterlist *src;
284 uint32_t nbytes;
Mona Hossainb43e94b2012-05-07 08:52:06 -0700285
286 struct scatterlist *orig_src; /* Original src sg ptr */
287 struct scatterlist dsg; /* Data sg */
288 unsigned char *data; /* Incoming data pointer*/
289 unsigned char *data2; /* Updated data pointer*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700290};
291
292static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
293 unsigned int len)
294{
295 unsigned n;
296
297 n = len / sizeof(uint32_t) ;
298 for (; n > 0; n--) {
299 *iv = ((*b << 24) & 0xff000000) |
300 (((*(b+1)) << 16) & 0xff0000) |
301 (((*(b+2)) << 8) & 0xff00) |
302 (*(b+3) & 0xff);
303 b += sizeof(uint32_t);
304 iv++;
305 }
306
307 n = len % sizeof(uint32_t);
308 if (n == 3) {
309 *iv = ((*b << 24) & 0xff000000) |
310 (((*(b+1)) << 16) & 0xff0000) |
311 (((*(b+2)) << 8) & 0xff00) ;
312 } else if (n == 2) {
313 *iv = ((*b << 24) & 0xff000000) |
314 (((*(b+1)) << 16) & 0xff0000) ;
315 } else if (n == 1) {
316 *iv = ((*b << 24) & 0xff000000) ;
317 }
318}
319
320static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
321 unsigned int len)
322{
323 unsigned n = len / sizeof(uint32_t);
324
325 for (; n > 0; n--) {
326 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
327 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
328 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
329 *b++ = (unsigned char) (*iv & 0xff);
330 iv++;
331 }
332 n = len % sizeof(uint32_t);
333 if (n == 3) {
334 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
335 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
336 *b = (unsigned char) ((*iv >> 8) & 0xff);
337 } else if (n == 2) {
338 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
339 *b = (unsigned char) ((*iv >> 16) & 0xff);
340 } else if (n == 1) {
341 *b = (unsigned char) ((*iv >> 24) & 0xff);
342 }
343}
344
Mona Hossain313f4ec2012-03-06 13:46:14 -0800345static void qcrypto_ce_high_bw_req(struct crypto_priv *cp, bool high_bw_req)
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800346{
347 int ret = 0;
348
349 mutex_lock(&sent_bw_req);
350 if (high_bw_req) {
351 if (cp->high_bw_req_count == 0)
352 ret = msm_bus_scale_client_update_request(
353 cp->bus_scale_handle, 1);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800354 if (ret)
355 pr_err("%s Unable to set to high bandwidth\n",
356 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800357 cp->high_bw_req_count++;
358 } else {
359 if (cp->high_bw_req_count == 1)
360 ret = msm_bus_scale_client_update_request(
361 cp->bus_scale_handle, 0);
Mona Hossain313f4ec2012-03-06 13:46:14 -0800362 if (ret)
363 pr_err("%s Unable to set to low bandwidth\n",
364 __func__);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800365 cp->high_bw_req_count--;
366 }
367 mutex_unlock(&sent_bw_req);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800368}
369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370static void _start_qcrypto_process(struct crypto_priv *cp);
371
372static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
373 struct ahash_alg *template)
374{
375 struct qcrypto_alg *q_alg;
376 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
377 if (!q_alg) {
378 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
379 PTR_ERR(q_alg));
380 return ERR_PTR(-ENOMEM);
381 }
382
383 q_alg->alg_type = QCRYPTO_ALG_SHA;
384 q_alg->sha_alg = *template;
385 q_alg->cp = cp;
386
387 return q_alg;
388};
389
390static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
391 struct crypto_alg *template)
392{
393 struct qcrypto_alg *q_alg;
394
395 q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
396 if (!q_alg) {
397 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
398 PTR_ERR(q_alg));
399 return ERR_PTR(-ENOMEM);
400 }
401
402 q_alg->alg_type = QCRYPTO_ALG_CIPHER;
403 q_alg->cipher_alg = *template;
404 q_alg->cp = cp;
405
406 return q_alg;
407};
408
409static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
410{
411 struct crypto_alg *alg = tfm->__crt_alg;
412 struct qcrypto_alg *q_alg;
413 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
414
415 q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
416
417 /* update context with ptr to cp */
418 ctx->cp = q_alg->cp;
419
420 /* random first IV */
421 get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800422 if (ctx->cp->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800423 qcrypto_ce_high_bw_req(ctx->cp, true);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424
425 return 0;
426};
427
428static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
429{
430 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
431 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
432 struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
433 struct ahash_alg, halg);
434 struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
435 sha_alg);
436
437 crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
438 /* update context with ptr to cp */
439 sha_ctx->cp = q_alg->cp;
440 sha_ctx->sg = NULL;
441 sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE +
442 SHA_MAX_DIGEST_SIZE, GFP_KERNEL);
443 if (sha_ctx->tmp_tbuf == NULL) {
444 pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n",
445 PTR_ERR(sha_ctx->tmp_tbuf));
446 return -ENOMEM;
447 }
448
449 sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL);
450 if (sha_ctx->trailing_buf == NULL) {
451 kfree(sha_ctx->tmp_tbuf);
452 sha_ctx->tmp_tbuf = NULL;
453 pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n",
454 PTR_ERR(sha_ctx->trailing_buf));
455 return -ENOMEM;
456 }
457
458 sha_ctx->ahash_req = NULL;
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800459 if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
Mona Hossain313f4ec2012-03-06 13:46:14 -0800460 qcrypto_ce_high_bw_req(sha_ctx->cp, true);
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462 return 0;
463};
464
465static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
466{
467 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
468
469 kfree(sha_ctx->tmp_tbuf);
470 sha_ctx->tmp_tbuf = NULL;
471 kfree(sha_ctx->trailing_buf);
472 sha_ctx->trailing_buf = NULL;
473 if (sha_ctx->sg != NULL) {
474 kfree(sha_ctx->sg);
475 sha_ctx->sg = NULL;
476 }
477 if (sha_ctx->ahash_req != NULL) {
478 ahash_request_free(sha_ctx->ahash_req);
479 sha_ctx->ahash_req = NULL;
480 }
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800481 if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
482 qcrypto_ce_high_bw_req(sha_ctx->cp, false);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483};
484
485
486static void _crypto_sha_hmac_ahash_req_complete(
487 struct crypto_async_request *req, int err);
488
489static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
490{
491 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
492 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
493 int ret = 0;
494
495 ret = _qcrypto_ahash_cra_init(tfm);
496 if (ret)
497 return ret;
498 sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
499
500 if (sha_ctx->ahash_req == NULL) {
501 _qcrypto_ahash_cra_exit(tfm);
502 return -ENOMEM;
503 }
504
505 init_completion(&sha_ctx->ahash_req_complete);
506 ahash_request_set_callback(sha_ctx->ahash_req,
507 CRYPTO_TFM_REQ_MAY_BACKLOG,
508 _crypto_sha_hmac_ahash_req_complete,
509 &sha_ctx->ahash_req_complete);
510 crypto_ahash_clear_flags(ahash, ~0);
511
512 return 0;
513};
514
515static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
516{
517 tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
518 return _qcrypto_cipher_cra_init(tfm);
519};
520
521static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm)
522{
523 tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
524 return _qcrypto_cipher_cra_init(tfm);
525};
526
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800527static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
528{
529 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
530
531 if (ctx->cp->platform_support.bus_scale_table != NULL)
532 qcrypto_ce_high_bw_req(ctx->cp, false);
533};
534
535static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm)
536{
537 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
538
539 if (ctx->cp->platform_support.bus_scale_table != NULL)
540 qcrypto_ce_high_bw_req(ctx->cp, false);
541};
542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543static int _disp_stats(int id)
544{
545 struct crypto_stat *pstat;
546 int len = 0;
547
548 pstat = &_qcrypto_stat[id];
549 len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
550 "\nQualcomm crypto accelerator %d Statistics:\n",
551 id + 1);
552
553 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
554 " ABLK AES CIPHER encryption : %d\n",
555 pstat->ablk_cipher_aes_enc);
556 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
557 " ABLK AES CIPHER decryption : %d\n",
558 pstat->ablk_cipher_aes_dec);
559
560 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
561 " ABLK DES CIPHER encryption : %d\n",
562 pstat->ablk_cipher_des_enc);
563 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
564 " ABLK DES CIPHER decryption : %d\n",
565 pstat->ablk_cipher_des_dec);
566
567 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
568 " ABLK 3DES CIPHER encryption : %d\n",
569 pstat->ablk_cipher_3des_enc);
570
571 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
572 " ABLK 3DES CIPHER decryption : %d\n",
573 pstat->ablk_cipher_3des_dec);
574
575 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
576 " ABLK CIPHER operation success: %d\n",
577 pstat->ablk_cipher_op_success);
578 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
579 " ABLK CIPHER operation fail : %d\n",
580 pstat->ablk_cipher_op_fail);
581
582 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
583 " AEAD SHA1-AES encryption : %d\n",
584 pstat->aead_sha1_aes_enc);
585 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
586 " AEAD SHA1-AES decryption : %d\n",
587 pstat->aead_sha1_aes_dec);
588
589 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
590 " AEAD SHA1-DES encryption : %d\n",
591 pstat->aead_sha1_des_enc);
592 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
593 " AEAD SHA1-DES decryption : %d\n",
594 pstat->aead_sha1_des_dec);
595
596 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
597 " AEAD SHA1-3DES encryption : %d\n",
598 pstat->aead_sha1_3des_enc);
599 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
600 " AEAD SHA1-3DES decryption : %d\n",
601 pstat->aead_sha1_3des_dec);
602
603 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
604 " AEAD operation success : %d\n",
605 pstat->aead_op_success);
606 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
607 " AEAD operation fail : %d\n",
608 pstat->aead_op_fail);
609 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
610 " SHA1 digest : %d\n",
611 pstat->sha1_digest);
612 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
613 " SHA256 digest : %d\n",
614 pstat->sha256_digest);
615 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
616 " SHA operation fail : %d\n",
617 pstat->sha_op_fail);
618 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
619 " SHA operation success : %d\n",
620 pstat->sha_op_success);
621 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
622 " SHA1 HMAC digest : %d\n",
623 pstat->sha1_hmac_digest);
624 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
625 " SHA256 HMAC digest : %d\n",
626 pstat->sha256_hmac_digest);
627 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
628 " SHA HMAC operation fail : %d\n",
629 pstat->sha_hmac_op_fail);
630 len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
631 " SHA HMAC operation success : %d\n",
632 pstat->sha_hmac_op_success);
633 return len;
634}
635
636static int _qcrypto_remove(struct platform_device *pdev)
637{
638 struct crypto_priv *cp;
639 struct qcrypto_alg *q_alg;
640 struct qcrypto_alg *n;
641
642 cp = platform_get_drvdata(pdev);
643
644 if (!cp)
645 return 0;
646
Ramesh Masavarapu49259682011-12-02 14:00:18 -0800647 if (cp->platform_support.bus_scale_table != NULL)
648 msm_bus_scale_unregister_client(cp->bus_scale_handle);
649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
651 if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
652 crypto_unregister_alg(&q_alg->cipher_alg);
653 if (q_alg->alg_type == QCRYPTO_ALG_SHA)
654 crypto_unregister_ahash(&q_alg->sha_alg);
655 list_del(&q_alg->entry);
656 kfree(q_alg);
657 }
658
659 if (cp->qce)
660 qce_close(cp->qce);
661 tasklet_kill(&cp->done_tasklet);
662 kfree(cp);
663 return 0;
664};
665
666static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
667 unsigned int len)
668{
669 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
670 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
671 struct crypto_priv *cp = ctx->cp;
672
673 switch (len) {
674 case AES_KEYSIZE_128:
675 case AES_KEYSIZE_256:
676 break;
677 case AES_KEYSIZE_192:
678 if (cp->ce_support.aes_key_192)
679 break;
680 default:
681 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
682 return -EINVAL;
683 };
684 ctx->enc_key_len = len;
685 memcpy(ctx->enc_key, key, len);
686 return 0;
687};
688
689static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
690 unsigned int len)
691{
692 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
693 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
694 u32 tmp[DES_EXPKEY_WORDS];
695 int ret = des_ekey(tmp, key);
696
697 if (len != DES_KEY_SIZE) {
698 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
699 return -EINVAL;
700 };
701
702 if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
703 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
704 return -EINVAL;
705 }
706
707 ctx->enc_key_len = len;
708 memcpy(ctx->enc_key, key, len);
709 return 0;
710};
711
712static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
713 unsigned int len)
714{
715 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
716 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
717
718 if (len != DES3_EDE_KEY_SIZE) {
719 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
720 return -EINVAL;
721 };
722 ctx->enc_key_len = len;
723 memcpy(ctx->enc_key, key, len);
724 return 0;
725};
726
727static void req_done(unsigned long data)
728{
729 struct crypto_async_request *areq;
730 struct crypto_priv *cp = (struct crypto_priv *)data;
731 unsigned long flags;
732
733 spin_lock_irqsave(&cp->lock, flags);
734 areq = cp->req;
735 cp->req = NULL;
736 spin_unlock_irqrestore(&cp->lock, flags);
737
738 if (areq)
739 areq->complete(areq, cp->res);
740 _start_qcrypto_process(cp);
741};
742
743static void _update_sha1_ctx(struct ahash_request *req)
744{
745 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
746 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
747 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
748
749 if (sha_ctx->last_blk == 1)
750 memset(sha_state_ctx, 0x00, sizeof(struct sha1_state));
751 else {
752 memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE);
753 memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf,
754 sha_ctx->trailing_buf_len);
755 _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest,
756 SHA1_DIGEST_SIZE);
757 }
758 return;
759}
760
761static void _update_sha256_ctx(struct ahash_request *req)
762{
763 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
764 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
765 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
766
767 if (sha_ctx->last_blk == 1)
768 memset(sha_state_ctx, 0x00, sizeof(struct sha256_state));
769 else {
770 memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE);
771 memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf,
772 sha_ctx->trailing_buf_len);
773 _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest,
774 SHA256_DIGEST_SIZE);
775 }
776 return;
777}
778
779static void _qce_ahash_complete(void *cookie, unsigned char *digest,
780 unsigned char *authdata, int ret)
781{
782 struct ahash_request *areq = (struct ahash_request *) cookie;
783 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
784 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
785 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
786 struct crypto_priv *cp = sha_ctx->cp;
787 struct crypto_stat *pstat;
788 uint32_t diglen = crypto_ahash_digestsize(ahash);
789 uint32_t *auth32 = (uint32_t *)authdata;
790
791 pstat = &_qcrypto_stat[cp->pdev->id];
792
793#ifdef QCRYPTO_DEBUG
794 dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
795 areq, ret);
796#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700797 if (digest) {
798 memcpy(sha_ctx->digest, digest, diglen);
799 memcpy(areq->result, digest, diglen);
800 }
801 if (authdata) {
802 sha_ctx->byte_count[0] = auth32[0];
803 sha_ctx->byte_count[1] = auth32[1];
804 sha_ctx->byte_count[2] = auth32[2];
805 sha_ctx->byte_count[3] = auth32[3];
806 }
807 areq->src = rctx->src;
808 areq->nbytes = rctx->nbytes;
809
810 if (sha_ctx->sg != NULL) {
811 kfree(sha_ctx->sg);
812 sha_ctx->sg = NULL;
813 }
814
815 if (sha_ctx->alg == QCE_HASH_SHA1)
816 _update_sha1_ctx(areq);
817 if (sha_ctx->alg == QCE_HASH_SHA256)
818 _update_sha256_ctx(areq);
819
820 sha_ctx->last_blk = 0;
821 sha_ctx->first_blk = 0;
822
823 if (ret) {
824 cp->res = -ENXIO;
825 pstat->sha_op_fail++;
826 } else {
827 cp->res = 0;
828 pstat->sha_op_success++;
829 }
Mona Hossainb43e94b2012-05-07 08:52:06 -0700830 if (cp->ce_support.aligned_only) {
831 areq->src = rctx->orig_src;
832 kfree(rctx->data);
833 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834
835 if (cp->platform_support.ce_shared)
836 schedule_work(&cp->unlock_ce_ws);
837 tasklet_schedule(&cp->done_tasklet);
838};
839
840static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
841 unsigned char *iv, int ret)
842{
843 struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
844 struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
845 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
846 struct crypto_priv *cp = ctx->cp;
847 struct crypto_stat *pstat;
848
849 pstat = &_qcrypto_stat[cp->pdev->id];
850
851#ifdef QCRYPTO_DEBUG
852 dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
853 areq, ret);
854#endif
855 if (iv)
856 memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
857
858 if (ret) {
859 cp->res = -ENXIO;
860 pstat->ablk_cipher_op_fail++;
861 } else {
862 cp->res = 0;
863 pstat->ablk_cipher_op_success++;
864 }
Mona Hossainb43e94b2012-05-07 08:52:06 -0700865
866 if (cp->ce_support.aligned_only) {
867 struct qcrypto_cipher_req_ctx *rctx;
868 struct scatterlist *sg;
869 uint32_t bytes = 0;
870
871 rctx = ablkcipher_request_ctx(areq);
872 areq->src = rctx->orig_src;
873 areq->dst = rctx->orig_dst;
874
875 for (sg = areq->dst; bytes != areq->nbytes; sg++) {
876 memcpy(sg_virt(sg), ((char *)rctx->data + bytes),
877 sg->length);
878 bytes += sg->length;
879 }
880 kfree(rctx->data);
881 }
882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 if (cp->platform_support.ce_shared)
884 schedule_work(&cp->unlock_ce_ws);
885 tasklet_schedule(&cp->done_tasklet);
886};
887
888
889static void _qce_aead_complete(void *cookie, unsigned char *icv,
890 unsigned char *iv, int ret)
891{
892 struct aead_request *areq = (struct aead_request *) cookie;
893 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
894 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
895 struct crypto_priv *cp = ctx->cp;
896 struct qcrypto_cipher_req_ctx *rctx;
897 struct crypto_stat *pstat;
898
899 pstat = &_qcrypto_stat[cp->pdev->id];
900
901 rctx = aead_request_ctx(areq);
902
903 if (rctx->mode == QCE_MODE_CCM) {
Mona Hossainb43e94b2012-05-07 08:52:06 -0700904 if (cp->ce_support.aligned_only) {
905 struct qcrypto_cipher_req_ctx *rctx;
906 struct scatterlist *sg;
907 uint32_t bytes = 0;
908 uint32_t nbytes = 0;
909
910 rctx = aead_request_ctx(areq);
911 areq->src = rctx->orig_src;
912 areq->dst = rctx->orig_dst;
913 if (rctx->dir == QCE_ENCRYPT)
914 nbytes = areq->cryptlen +
915 crypto_aead_authsize(aead);
916 else
917 nbytes = areq->cryptlen -
918 crypto_aead_authsize(aead);
919
920 for (sg = areq->dst; bytes != nbytes; sg++) {
921 memcpy(sg_virt(sg),
922 ((char *)rctx->data + rctx->assoclen + bytes),
923 sg->length);
924 bytes += sg->length;
925 }
926 kfree(rctx->data);
927 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 kzfree(rctx->assoc);
929 areq->assoc = rctx->assoc_sg;
930 areq->assoclen = rctx->assoclen;
931 if (ret) {
932 if (ret == 0x2000000)
933 ret = -EBADMSG;
934 else
935 ret = -ENXIO;
936 }
937 } else {
938 if (ret == 0) {
939 if (rctx->dir == QCE_ENCRYPT) {
940 /* copy the icv to dst */
941 scatterwalk_map_and_copy(icv, areq->dst,
942 areq->cryptlen,
943 ctx->authsize, 1);
944
945 } else {
Ramesh Masavarapuda92ace2012-06-06 08:06:05 -0700946 unsigned char tmp[SHA256_DIGESTSIZE] = {0};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700947
948 /* compare icv from src */
949 scatterwalk_map_and_copy(tmp,
950 areq->src, areq->cryptlen -
951 ctx->authsize, ctx->authsize, 0);
952 ret = memcmp(icv, tmp, ctx->authsize);
953 if (ret != 0)
954 ret = -EBADMSG;
955
956 }
957 } else {
958 ret = -ENXIO;
959 }
960
961 if (iv)
962 memcpy(ctx->iv, iv, crypto_aead_ivsize(aead));
963 }
964
965 if (ret)
966 pstat->aead_op_fail++;
967 else
968 pstat->aead_op_success++;
969
970 if (cp->platform_support.ce_shared)
971 schedule_work(&cp->unlock_ce_ws);
972 tasklet_schedule(&cp->done_tasklet);
973}
974
975static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
976{
977 __be32 data;
978
979 memset(block, 0, csize);
980 block += csize;
981
982 if (csize >= 4)
983 csize = 4;
984 else if (msglen > (1 << (8 * csize)))
985 return -EOVERFLOW;
986
987 data = cpu_to_be32(msglen);
988 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
989
990 return 0;
991}
992
993static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
994{
995 struct aead_request *areq = (struct aead_request *) qreq->areq;
996 unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
997
998 memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize);
999 /*
1000 * Format control info per RFC 3610 and
1001 * NIST Special Publication 800-38C
1002 */
1003 qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
1004 if (areq->assoclen)
1005 qreq->nonce[0] |= 64;
1006
Ramesh Masavarapuc52c2372011-10-27 07:35:56 -07001007 if (i > MAX_NONCE)
1008 return -EINVAL;
1009
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001010 return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
1011}
1012
1013static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
1014 struct scatterlist *sg)
1015{
1016 unsigned char *adata;
1017 uint32_t len, l;
1018
1019 qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA));
1020 if (!qreq->assoc) {
1021 pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n",
1022 PTR_ERR(qreq->assoc));
1023 return -ENOMEM;
1024 }
1025 adata = qreq->assoc;
1026 /*
1027 * Add control info for associated data
1028 * RFC 3610 and NIST Special Publication 800-38C
1029 */
1030 if (alen < 65280) {
1031 *(__be16 *)adata = cpu_to_be16(alen);
1032 len = 2;
1033 } else {
1034 if ((alen >= 65280) && (alen <= 0xffffffff)) {
1035 *(__be16 *)adata = cpu_to_be16(0xfffe);
1036 *(__be32 *)&adata[2] = cpu_to_be32(alen);
1037 len = 6;
1038 } else {
1039 *(__be16 *)adata = cpu_to_be16(0xffff);
1040 *(__be32 *)&adata[6] = cpu_to_be32(alen);
1041 len = 10;
1042 }
1043 }
1044 adata += len;
1045 qreq->assoclen = ALIGN((alen + len), 16);
1046 for (l = alen; l > 0; sg = sg_next(sg)) {
1047 memcpy(adata, sg_virt(sg), sg->length);
1048 l -= sg->length;
1049 adata += sg->length;
1050 }
1051 return 0;
1052}
1053
Mona Hossainb43e94b2012-05-07 08:52:06 -07001054static int _qcrypto_process_ablkcipher(struct crypto_priv *cp,
1055 struct crypto_async_request *async_req)
1056{
1057 struct qce_req qreq;
1058 int ret;
1059 struct qcrypto_cipher_req_ctx *rctx;
1060 struct qcrypto_cipher_ctx *cipher_ctx;
1061 struct ablkcipher_request *req;
1062 struct crypto_ablkcipher *tfm;
1063
1064 req = container_of(async_req, struct ablkcipher_request, base);
1065 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
1066 rctx = ablkcipher_request_ctx(req);
1067 tfm = crypto_ablkcipher_reqtfm(req);
1068 if (cp->ce_support.aligned_only) {
1069 uint32_t bytes = 0;
1070 struct scatterlist *sg = req->src;
1071
1072 rctx->orig_src = req->src;
1073 rctx->orig_dst = req->dst;
1074 rctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
1075 for (sg = req->src; bytes != req->nbytes; sg++) {
1076 memcpy(((char *)rctx->data + bytes),
1077 sg_virt(sg), sg->length);
1078 bytes += sg->length;
1079 }
1080 sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
1081 sg_mark_end(&rctx->dsg);
1082 rctx->iv = req->info;
1083
1084 req->src = &rctx->dsg;
1085 req->dst = &rctx->dsg;
1086
1087 }
1088 qreq.op = QCE_REQ_ABLK_CIPHER;
1089 qreq.qce_cb = _qce_ablk_cipher_complete;
1090 qreq.areq = req;
1091 qreq.alg = rctx->alg;
1092 qreq.dir = rctx->dir;
1093 qreq.mode = rctx->mode;
1094 qreq.enckey = cipher_ctx->enc_key;
1095 qreq.encklen = cipher_ctx->enc_key_len;
1096 qreq.iv = req->info;
1097 qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
1098 qreq.cryptlen = req->nbytes;
1099 qreq.use_pmem = 0;
1100
1101 if ((cipher_ctx->enc_key_len == 0) &&
1102 (cp->platform_support.hw_key_support == 0))
1103 ret = -EINVAL;
1104 else
1105 ret = qce_ablk_cipher_req(cp->qce, &qreq);
1106
1107 return ret;
1108}
1109
1110static int _qcrypto_process_ahash(struct crypto_priv *cp,
1111 struct crypto_async_request *async_req)
1112{
1113 struct ahash_request *req;
1114 struct qce_sha_req sreq;
1115 struct qcrypto_sha_ctx *sha_ctx;
1116 int ret = 0;
1117
1118 req = container_of(async_req,
1119 struct ahash_request, base);
1120 sha_ctx = crypto_tfm_ctx(async_req->tfm);
1121
1122 sreq.qce_cb = _qce_ahash_complete;
1123 sreq.digest = &sha_ctx->digest[0];
1124 sreq.src = req->src;
1125 sreq.auth_data[0] = sha_ctx->byte_count[0];
1126 sreq.auth_data[1] = sha_ctx->byte_count[1];
1127 sreq.auth_data[2] = sha_ctx->byte_count[2];
1128 sreq.auth_data[3] = sha_ctx->byte_count[3];
1129 sreq.first_blk = sha_ctx->first_blk;
1130 sreq.last_blk = sha_ctx->last_blk;
1131 sreq.size = req->nbytes;
1132 sreq.areq = req;
1133
1134 switch (sha_ctx->alg) {
1135 case QCE_HASH_SHA1:
1136 sreq.alg = QCE_HASH_SHA1;
1137 sreq.authkey = NULL;
1138 break;
1139 case QCE_HASH_SHA256:
1140 sreq.alg = QCE_HASH_SHA256;
1141 sreq.authkey = NULL;
1142 break;
1143 case QCE_HASH_SHA1_HMAC:
1144 sreq.alg = QCE_HASH_SHA1_HMAC;
1145 sreq.authkey = &sha_ctx->authkey[0];
1146 sreq.authklen = SHA_HMAC_KEY_SIZE;
1147 break;
1148 case QCE_HASH_SHA256_HMAC:
1149 sreq.alg = QCE_HASH_SHA256_HMAC;
1150 sreq.authkey = &sha_ctx->authkey[0];
1151 sreq.authklen = SHA_HMAC_KEY_SIZE;
1152 break;
1153 default:
1154 break;
1155 };
1156 ret = qce_process_sha_req(cp->qce, &sreq);
1157
1158 return ret;
1159}
1160
1161static int _qcrypto_process_aead(struct crypto_priv *cp,
1162 struct crypto_async_request *async_req)
1163{
1164 struct qce_req qreq;
1165 int ret = 0;
1166 struct qcrypto_cipher_req_ctx *rctx;
1167 struct qcrypto_cipher_ctx *cipher_ctx;
1168 struct aead_request *req = container_of(async_req,
1169 struct aead_request, base);
1170 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1171
1172 rctx = aead_request_ctx(req);
1173 cipher_ctx = crypto_tfm_ctx(async_req->tfm);
1174
1175 qreq.op = QCE_REQ_AEAD;
1176 qreq.qce_cb = _qce_aead_complete;
1177
1178 qreq.areq = req;
1179 qreq.alg = rctx->alg;
1180 qreq.dir = rctx->dir;
1181 qreq.mode = rctx->mode;
1182 qreq.iv = rctx->iv;
1183
1184 qreq.enckey = cipher_ctx->enc_key;
1185 qreq.encklen = cipher_ctx->enc_key_len;
1186 qreq.authkey = cipher_ctx->auth_key;
1187 qreq.authklen = cipher_ctx->auth_key_len;
1188 qreq.authsize = crypto_aead_authsize(aead);
1189 qreq.ivsize = crypto_aead_ivsize(aead);
1190 if (qreq.mode == QCE_MODE_CCM) {
1191 if (qreq.dir == QCE_ENCRYPT)
1192 qreq.cryptlen = req->cryptlen;
1193 else
1194 qreq.cryptlen = req->cryptlen -
1195 qreq.authsize;
1196 /* Get NONCE */
1197 ret = qccrypto_set_aead_ccm_nonce(&qreq);
1198 if (ret)
1199 return ret;
1200
1201 /* Format Associated data */
1202 ret = qcrypto_aead_ccm_format_adata(&qreq,
1203 req->assoclen,
1204 req->assoc);
1205 if (ret)
1206 return ret;
1207
1208 if (cp->ce_support.aligned_only) {
1209 uint32_t bytes = 0;
1210 struct scatterlist *sg = req->src;
1211
1212 rctx->orig_src = req->src;
1213 rctx->orig_dst = req->dst;
1214 rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
1215 qreq.authsize + 64*2), GFP_KERNEL);
1216
1217 memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
1218
1219 for (sg = req->src; bytes != req->cryptlen; sg++) {
1220 memcpy((rctx->data + bytes + qreq.assoclen),
1221 sg_virt(sg), sg->length);
1222 bytes += sg->length;
1223 }
1224 sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
1225 qreq.assoclen);
1226 sg_mark_end(&rctx->ssg);
1227
1228 if (qreq.dir == QCE_ENCRYPT)
1229 sg_set_buf(&rctx->dsg, rctx->data,
1230 qreq.assoclen + qreq.cryptlen +
1231 ALIGN(qreq.authsize, 64));
1232 else
1233 sg_set_buf(&rctx->dsg, rctx->data,
1234 qreq.assoclen + req->cryptlen +
1235 qreq.authsize);
1236 sg_mark_end(&rctx->dsg);
1237
1238 req->src = &rctx->ssg;
1239 req->dst = &rctx->dsg;
1240 }
1241 /*
1242 * Save the original associated data
1243 * length and sg
1244 */
1245 rctx->assoc_sg = req->assoc;
1246 rctx->assoclen = req->assoclen;
1247 rctx->assoc = qreq.assoc;
1248 /*
1249 * update req with new formatted associated
1250 * data info
1251 */
1252 req->assoc = &rctx->asg;
1253 req->assoclen = qreq.assoclen;
1254 sg_set_buf(req->assoc, qreq.assoc,
1255 req->assoclen);
1256 sg_mark_end(req->assoc);
1257 }
1258 ret = qce_aead_req(cp->qce, &qreq);
1259
1260 return ret;
1261}
1262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263static void _start_qcrypto_process(struct crypto_priv *cp)
1264{
1265 struct crypto_async_request *async_req = NULL;
1266 struct crypto_async_request *backlog = NULL;
1267 unsigned long flags;
1268 u32 type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001269 int ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 struct crypto_stat *pstat;
1271
1272 pstat = &_qcrypto_stat[cp->pdev->id];
1273
1274again:
1275 spin_lock_irqsave(&cp->lock, flags);
1276 if (cp->req == NULL) {
1277 backlog = crypto_get_backlog(&cp->queue);
1278 async_req = crypto_dequeue_request(&cp->queue);
1279 cp->req = async_req;
1280 }
1281 spin_unlock_irqrestore(&cp->lock, flags);
1282 if (!async_req)
1283 return;
1284 if (backlog)
1285 backlog->complete(backlog, -EINPROGRESS);
1286 type = crypto_tfm_alg_type(async_req->tfm);
1287
Mona Hossainb43e94b2012-05-07 08:52:06 -07001288 switch (type) {
1289 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1290 ret = _qcrypto_process_ablkcipher(cp, async_req);
1291 break;
1292 case CRYPTO_ALG_TYPE_AHASH:
1293 ret = _qcrypto_process_ahash(cp, async_req);
1294 break;
1295 case CRYPTO_ALG_TYPE_AEAD:
1296 ret = _qcrypto_process_aead(cp, async_req);
1297 break;
1298 default:
1299 ret = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001301
Mona Hossainb43e94b2012-05-07 08:52:06 -07001302 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303 spin_lock_irqsave(&cp->lock, flags);
1304 cp->req = NULL;
1305 spin_unlock_irqrestore(&cp->lock, flags);
1306
1307 if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
1308 pstat->ablk_cipher_op_fail++;
1309 else
1310 if (type == CRYPTO_ALG_TYPE_AHASH)
1311 pstat->sha_op_fail++;
1312 else
1313 pstat->aead_op_fail++;
1314
1315 async_req->complete(async_req, ret);
1316 goto again;
1317 };
1318};
1319
1320static int _qcrypto_queue_req(struct crypto_priv *cp,
1321 struct crypto_async_request *req)
1322{
1323 int ret;
1324 unsigned long flags;
1325
1326 if (cp->platform_support.ce_shared) {
1327 ret = qcrypto_lock_ce(cp);
1328 if (ret)
1329 return ret;
1330 }
1331
1332 spin_lock_irqsave(&cp->lock, flags);
1333 ret = crypto_enqueue_request(&cp->queue, req);
1334 spin_unlock_irqrestore(&cp->lock, flags);
1335 _start_qcrypto_process(cp);
1336
1337 return ret;
1338}
1339
1340static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
1341{
1342 struct qcrypto_cipher_req_ctx *rctx;
1343 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1344 struct crypto_priv *cp = ctx->cp;
1345 struct crypto_stat *pstat;
1346
1347 pstat = &_qcrypto_stat[cp->pdev->id];
1348
1349 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1350 CRYPTO_ALG_TYPE_ABLKCIPHER);
1351#ifdef QCRYPTO_DEBUG
1352 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
1353#endif
1354 rctx = ablkcipher_request_ctx(req);
1355 rctx->aead = 0;
1356 rctx->alg = CIPHER_ALG_AES;
1357 rctx->dir = QCE_ENCRYPT;
1358 rctx->mode = QCE_MODE_ECB;
1359
1360 pstat->ablk_cipher_aes_enc++;
1361 return _qcrypto_queue_req(cp, &req->base);
1362};
1363
1364static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
1365{
1366 struct qcrypto_cipher_req_ctx *rctx;
1367 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1368 struct crypto_priv *cp = ctx->cp;
1369 struct crypto_stat *pstat;
1370
1371 pstat = &_qcrypto_stat[cp->pdev->id];
1372
1373 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1374 CRYPTO_ALG_TYPE_ABLKCIPHER);
1375#ifdef QCRYPTO_DEBUG
1376 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
1377#endif
1378 rctx = ablkcipher_request_ctx(req);
1379 rctx->aead = 0;
1380 rctx->alg = CIPHER_ALG_AES;
1381 rctx->dir = QCE_ENCRYPT;
1382 rctx->mode = QCE_MODE_CBC;
1383
1384 pstat->ablk_cipher_aes_enc++;
1385 return _qcrypto_queue_req(cp, &req->base);
1386};
1387
1388static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
1389{
1390 struct qcrypto_cipher_req_ctx *rctx;
1391 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1392 struct crypto_priv *cp = ctx->cp;
1393 struct crypto_stat *pstat;
1394
1395 pstat = &_qcrypto_stat[cp->pdev->id];
1396
1397 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1398 CRYPTO_ALG_TYPE_ABLKCIPHER);
1399#ifdef QCRYPTO_DEBUG
1400 dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
1401#endif
1402 rctx = ablkcipher_request_ctx(req);
1403 rctx->aead = 0;
1404 rctx->alg = CIPHER_ALG_AES;
1405 rctx->dir = QCE_ENCRYPT;
1406 rctx->mode = QCE_MODE_CTR;
1407
1408 pstat->ablk_cipher_aes_enc++;
1409 return _qcrypto_queue_req(cp, &req->base);
1410};
1411
1412static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
1413{
1414 struct qcrypto_cipher_req_ctx *rctx;
1415 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1416 struct crypto_priv *cp = ctx->cp;
1417 struct crypto_stat *pstat;
1418
1419 pstat = &_qcrypto_stat[cp->pdev->id];
1420
1421 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1422 CRYPTO_ALG_TYPE_ABLKCIPHER);
1423 rctx = ablkcipher_request_ctx(req);
1424 rctx->aead = 0;
1425 rctx->alg = CIPHER_ALG_AES;
1426 rctx->dir = QCE_ENCRYPT;
1427 rctx->mode = QCE_MODE_XTS;
1428
1429 pstat->ablk_cipher_aes_enc++;
1430 return _qcrypto_queue_req(cp, &req->base);
1431};
1432
1433static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
1434{
1435 struct qcrypto_cipher_req_ctx *rctx;
1436 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1437 struct crypto_priv *cp = ctx->cp;
1438 struct crypto_stat *pstat;
1439
1440 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1441 return -EINVAL;
1442 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1443 (ctx->auth_key_len != AES_KEYSIZE_256))
1444 return -EINVAL;
1445
1446 pstat = &_qcrypto_stat[cp->pdev->id];
1447
1448 rctx = aead_request_ctx(req);
1449 rctx->aead = 1;
1450 rctx->alg = CIPHER_ALG_AES;
1451 rctx->dir = QCE_ENCRYPT;
1452 rctx->mode = QCE_MODE_CCM;
1453 rctx->iv = req->iv;
1454
1455 pstat->aead_sha1_aes_enc++;
1456 return _qcrypto_queue_req(cp, &req->base);
1457}
1458
1459static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
1460{
1461 struct qcrypto_cipher_req_ctx *rctx;
1462 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1463 struct crypto_priv *cp = ctx->cp;
1464 struct crypto_stat *pstat;
1465
1466 pstat = &_qcrypto_stat[cp->pdev->id];
1467
1468 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1469 CRYPTO_ALG_TYPE_ABLKCIPHER);
1470 rctx = ablkcipher_request_ctx(req);
1471 rctx->aead = 0;
1472 rctx->alg = CIPHER_ALG_DES;
1473 rctx->dir = QCE_ENCRYPT;
1474 rctx->mode = QCE_MODE_ECB;
1475
1476 pstat->ablk_cipher_des_enc++;
1477 return _qcrypto_queue_req(cp, &req->base);
1478};
1479
1480static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
1481{
1482 struct qcrypto_cipher_req_ctx *rctx;
1483 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1484 struct crypto_priv *cp = ctx->cp;
1485 struct crypto_stat *pstat;
1486
1487 pstat = &_qcrypto_stat[cp->pdev->id];
1488
1489 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1490 CRYPTO_ALG_TYPE_ABLKCIPHER);
1491 rctx = ablkcipher_request_ctx(req);
1492 rctx->aead = 0;
1493 rctx->alg = CIPHER_ALG_DES;
1494 rctx->dir = QCE_ENCRYPT;
1495 rctx->mode = QCE_MODE_CBC;
1496
1497 pstat->ablk_cipher_des_enc++;
1498 return _qcrypto_queue_req(cp, &req->base);
1499};
1500
1501static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
1502{
1503 struct qcrypto_cipher_req_ctx *rctx;
1504 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1505 struct crypto_priv *cp = ctx->cp;
1506 struct crypto_stat *pstat;
1507
1508 pstat = &_qcrypto_stat[cp->pdev->id];
1509
1510 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1511 CRYPTO_ALG_TYPE_ABLKCIPHER);
1512 rctx = ablkcipher_request_ctx(req);
1513 rctx->aead = 0;
1514 rctx->alg = CIPHER_ALG_3DES;
1515 rctx->dir = QCE_ENCRYPT;
1516 rctx->mode = QCE_MODE_ECB;
1517
1518 pstat->ablk_cipher_3des_enc++;
1519 return _qcrypto_queue_req(cp, &req->base);
1520};
1521
1522static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
1523{
1524 struct qcrypto_cipher_req_ctx *rctx;
1525 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1526 struct crypto_priv *cp = ctx->cp;
1527 struct crypto_stat *pstat;
1528
1529 pstat = &_qcrypto_stat[cp->pdev->id];
1530
1531 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1532 CRYPTO_ALG_TYPE_ABLKCIPHER);
1533 rctx = ablkcipher_request_ctx(req);
1534 rctx->aead = 0;
1535 rctx->alg = CIPHER_ALG_3DES;
1536 rctx->dir = QCE_ENCRYPT;
1537 rctx->mode = QCE_MODE_CBC;
1538
1539 pstat->ablk_cipher_3des_enc++;
1540 return _qcrypto_queue_req(cp, &req->base);
1541};
1542
1543static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
1544{
1545 struct qcrypto_cipher_req_ctx *rctx;
1546 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1547 struct crypto_priv *cp = ctx->cp;
1548 struct crypto_stat *pstat;
1549
1550 pstat = &_qcrypto_stat[cp->pdev->id];
1551
1552 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1553 CRYPTO_ALG_TYPE_ABLKCIPHER);
1554#ifdef QCRYPTO_DEBUG
1555 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
1556#endif
1557 rctx = ablkcipher_request_ctx(req);
1558 rctx->aead = 0;
1559 rctx->alg = CIPHER_ALG_AES;
1560 rctx->dir = QCE_DECRYPT;
1561 rctx->mode = QCE_MODE_ECB;
1562
1563 pstat->ablk_cipher_aes_dec++;
1564 return _qcrypto_queue_req(cp, &req->base);
1565};
1566
1567static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
1568{
1569 struct qcrypto_cipher_req_ctx *rctx;
1570 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1571 struct crypto_priv *cp = ctx->cp;
1572 struct crypto_stat *pstat;
1573
1574 pstat = &_qcrypto_stat[cp->pdev->id];
1575
1576 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1577 CRYPTO_ALG_TYPE_ABLKCIPHER);
1578#ifdef QCRYPTO_DEBUG
1579 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
1580#endif
1581
1582 rctx = ablkcipher_request_ctx(req);
1583 rctx->aead = 0;
1584 rctx->alg = CIPHER_ALG_AES;
1585 rctx->dir = QCE_DECRYPT;
1586 rctx->mode = QCE_MODE_CBC;
1587
1588 pstat->ablk_cipher_aes_dec++;
1589 return _qcrypto_queue_req(cp, &req->base);
1590};
1591
1592static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
1593{
1594 struct qcrypto_cipher_req_ctx *rctx;
1595 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1596 struct crypto_priv *cp = ctx->cp;
1597 struct crypto_stat *pstat;
1598
1599 pstat = &_qcrypto_stat[cp->pdev->id];
1600
1601 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1602 CRYPTO_ALG_TYPE_ABLKCIPHER);
1603#ifdef QCRYPTO_DEBUG
1604 dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
1605#endif
1606 rctx = ablkcipher_request_ctx(req);
1607 rctx->aead = 0;
1608 rctx->alg = CIPHER_ALG_AES;
1609 rctx->mode = QCE_MODE_CTR;
1610
1611 /* Note. There is no such thing as aes/counter mode, decrypt */
1612 rctx->dir = QCE_ENCRYPT;
1613
1614 pstat->ablk_cipher_aes_dec++;
1615 return _qcrypto_queue_req(cp, &req->base);
1616};
1617
1618static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
1619{
1620 struct qcrypto_cipher_req_ctx *rctx;
1621 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1622 struct crypto_priv *cp = ctx->cp;
1623 struct crypto_stat *pstat;
1624
1625 pstat = &_qcrypto_stat[cp->pdev->id];
1626
1627 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1628 CRYPTO_ALG_TYPE_ABLKCIPHER);
1629 rctx = ablkcipher_request_ctx(req);
1630 rctx->aead = 0;
1631 rctx->alg = CIPHER_ALG_DES;
1632 rctx->dir = QCE_DECRYPT;
1633 rctx->mode = QCE_MODE_ECB;
1634
1635 pstat->ablk_cipher_des_dec++;
1636 return _qcrypto_queue_req(cp, &req->base);
1637};
1638
1639static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
1640{
1641 struct qcrypto_cipher_req_ctx *rctx;
1642 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1643 struct crypto_priv *cp = ctx->cp;
1644 struct crypto_stat *pstat;
1645
1646 pstat = &_qcrypto_stat[cp->pdev->id];
1647
1648 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1649 CRYPTO_ALG_TYPE_ABLKCIPHER);
1650 rctx = ablkcipher_request_ctx(req);
1651 rctx->aead = 0;
1652 rctx->alg = CIPHER_ALG_DES;
1653 rctx->dir = QCE_DECRYPT;
1654 rctx->mode = QCE_MODE_CBC;
1655
1656 pstat->ablk_cipher_des_dec++;
1657 return _qcrypto_queue_req(cp, &req->base);
1658};
1659
1660static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
1661{
1662 struct qcrypto_cipher_req_ctx *rctx;
1663 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1664 struct crypto_priv *cp = ctx->cp;
1665 struct crypto_stat *pstat;
1666
1667 pstat = &_qcrypto_stat[cp->pdev->id];
1668
1669 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1670 CRYPTO_ALG_TYPE_ABLKCIPHER);
1671 rctx = ablkcipher_request_ctx(req);
1672 rctx->aead = 0;
1673 rctx->alg = CIPHER_ALG_3DES;
1674 rctx->dir = QCE_DECRYPT;
1675 rctx->mode = QCE_MODE_ECB;
1676
1677 pstat->ablk_cipher_3des_dec++;
1678 return _qcrypto_queue_req(cp, &req->base);
1679};
1680
1681static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
1682{
1683 struct qcrypto_cipher_req_ctx *rctx;
1684 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1685 struct crypto_priv *cp = ctx->cp;
1686 struct crypto_stat *pstat;
1687
1688 pstat = &_qcrypto_stat[cp->pdev->id];
1689
1690 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1691 CRYPTO_ALG_TYPE_ABLKCIPHER);
1692 rctx = ablkcipher_request_ctx(req);
1693 rctx->aead = 0;
1694 rctx->alg = CIPHER_ALG_3DES;
1695 rctx->dir = QCE_DECRYPT;
1696 rctx->mode = QCE_MODE_CBC;
1697
1698 pstat->ablk_cipher_3des_dec++;
1699 return _qcrypto_queue_req(cp, &req->base);
1700};
1701
1702static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
1703{
1704 struct qcrypto_cipher_req_ctx *rctx;
1705 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1706 struct crypto_priv *cp = ctx->cp;
1707 struct crypto_stat *pstat;
1708
1709 pstat = &_qcrypto_stat[cp->pdev->id];
1710
1711 BUG_ON(crypto_tfm_alg_type(req->base.tfm) !=
1712 CRYPTO_ALG_TYPE_ABLKCIPHER);
1713 rctx = ablkcipher_request_ctx(req);
1714 rctx->aead = 0;
1715 rctx->alg = CIPHER_ALG_AES;
1716 rctx->mode = QCE_MODE_XTS;
1717 rctx->dir = QCE_DECRYPT;
1718
1719 pstat->ablk_cipher_aes_dec++;
1720 return _qcrypto_queue_req(cp, &req->base);
1721};
1722
1723
1724static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
1725{
1726 struct qcrypto_cipher_req_ctx *rctx;
1727 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1728 struct crypto_priv *cp = ctx->cp;
1729 struct crypto_stat *pstat;
1730
1731 if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
1732 return -EINVAL;
1733 if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
1734 (ctx->auth_key_len != AES_KEYSIZE_256))
1735 return -EINVAL;
1736
1737 pstat = &_qcrypto_stat[cp->pdev->id];
1738
1739 rctx = aead_request_ctx(req);
1740 rctx->aead = 1;
1741 rctx->alg = CIPHER_ALG_AES;
1742 rctx->dir = QCE_DECRYPT;
1743 rctx->mode = QCE_MODE_CCM;
1744 rctx->iv = req->iv;
1745
1746 pstat->aead_sha1_aes_dec++;
1747 return _qcrypto_queue_req(cp, &req->base);
1748}
1749
1750static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
1751 unsigned int authsize)
1752{
1753 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1754
1755 ctx->authsize = authsize;
1756 return 0;
1757}
1758
1759static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
1760 unsigned int authsize)
1761{
1762 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
1763
1764 switch (authsize) {
1765 case 4:
1766 case 6:
1767 case 8:
1768 case 10:
1769 case 12:
1770 case 14:
1771 case 16:
1772 break;
1773 default:
1774 return -EINVAL;
1775 }
1776 ctx->authsize = authsize;
1777 return 0;
1778}
1779
1780static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1781 unsigned int keylen)
1782{
1783 struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
1784 struct rtattr *rta = (struct rtattr *)key;
1785 struct crypto_authenc_key_param *param;
1786
1787 if (!RTA_OK(rta, keylen))
1788 goto badkey;
1789 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1790 goto badkey;
1791 if (RTA_PAYLOAD(rta) < sizeof(*param))
1792 goto badkey;
1793
1794 param = RTA_DATA(rta);
1795 ctx->enc_key_len = be32_to_cpu(param->enckeylen);
1796
1797 key += RTA_ALIGN(rta->rta_len);
1798 keylen -= RTA_ALIGN(rta->rta_len);
1799
1800 if (keylen < ctx->enc_key_len)
1801 goto badkey;
1802
1803 ctx->auth_key_len = keylen - ctx->enc_key_len;
1804 if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
1805 ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
1806 goto badkey;
1807 memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
1808 memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
1809 memcpy(ctx->auth_key, key, ctx->auth_key_len);
1810
1811 return 0;
1812badkey:
1813 ctx->enc_key_len = 0;
1814 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1815 return -EINVAL;
1816}
1817
1818static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
1819 unsigned int keylen)
1820{
1821 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
1822 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
1823 struct crypto_priv *cp = ctx->cp;
1824
1825 switch (keylen) {
1826 case AES_KEYSIZE_128:
1827 case AES_KEYSIZE_256:
1828 break;
1829 case AES_KEYSIZE_192:
1830 if (cp->ce_support.aes_key_192)
1831 break;
1832 default:
1833 ctx->enc_key_len = 0;
1834 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1835 return -EINVAL;
1836 };
1837 ctx->enc_key_len = keylen;
1838 memcpy(ctx->enc_key, key, keylen);
1839 ctx->auth_key_len = keylen;
1840 memcpy(ctx->auth_key, key, keylen);
1841
1842 return 0;
1843}
1844
1845static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
1846{
1847 struct qcrypto_cipher_req_ctx *rctx;
1848 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1849 struct crypto_priv *cp = ctx->cp;
1850 struct crypto_stat *pstat;
1851
1852 pstat = &_qcrypto_stat[cp->pdev->id];
1853
1854#ifdef QCRYPTO_DEBUG
1855 dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
1856#endif
1857
1858 rctx = aead_request_ctx(req);
1859 rctx->aead = 1;
1860 rctx->alg = CIPHER_ALG_AES;
1861 rctx->dir = QCE_ENCRYPT;
1862 rctx->mode = QCE_MODE_CBC;
1863 rctx->iv = req->iv;
1864
1865 pstat->aead_sha1_aes_enc++;
1866 return _qcrypto_queue_req(cp, &req->base);
1867}
1868
1869static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
1870{
1871 struct qcrypto_cipher_req_ctx *rctx;
1872 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1873 struct crypto_priv *cp = ctx->cp;
1874 struct crypto_stat *pstat;
1875
1876 pstat = &_qcrypto_stat[cp->pdev->id];
1877
1878#ifdef QCRYPTO_DEBUG
1879 dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
1880#endif
1881 rctx = aead_request_ctx(req);
1882 rctx->aead = 1;
1883 rctx->alg = CIPHER_ALG_AES;
1884 rctx->dir = QCE_DECRYPT;
1885 rctx->mode = QCE_MODE_CBC;
1886 rctx->iv = req->iv;
1887
1888 pstat->aead_sha1_aes_dec++;
1889 return _qcrypto_queue_req(cp, &req->base);
1890}
1891
1892static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req)
1893{
1894 struct aead_request *areq = &req->areq;
1895 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1896 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1897 struct crypto_priv *cp = ctx->cp;
1898 struct qcrypto_cipher_req_ctx *rctx;
1899 struct crypto_stat *pstat;
1900
1901 pstat = &_qcrypto_stat[cp->pdev->id];
1902
1903 rctx = aead_request_ctx(areq);
1904 rctx->aead = 1;
1905 rctx->alg = CIPHER_ALG_AES;
1906 rctx->dir = QCE_ENCRYPT;
1907 rctx->mode = QCE_MODE_CBC;
1908 rctx->iv = req->giv; /* generated iv */
1909
1910 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1911 /* avoid consecutive packets going out with same IV */
1912 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1913 pstat->aead_sha1_aes_enc++;
1914 return _qcrypto_queue_req(cp, &areq->base);
1915}
1916
1917#ifdef QCRYPTO_AEAD_AES_CTR
1918static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req)
1919{
1920 struct qcrypto_cipher_req_ctx *rctx;
1921 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1922 struct crypto_priv *cp = ctx->cp;
1923 struct crypto_stat *pstat;
1924
1925 pstat = &_qcrypto_stat[cp->pdev->id];
1926
1927 rctx = aead_request_ctx(req);
1928 rctx->aead = 1;
1929 rctx->alg = CIPHER_ALG_AES;
1930 rctx->dir = QCE_ENCRYPT;
1931 rctx->mode = QCE_MODE_CTR;
1932 rctx->iv = req->iv;
1933
1934 pstat->aead_sha1_aes_enc++;
1935 return _qcrypto_queue_req(cp, &req->base);
1936}
1937
1938static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req)
1939{
1940 struct qcrypto_cipher_req_ctx *rctx;
1941 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1942 struct crypto_priv *cp = ctx->cp;
1943 struct crypto_stat *pstat;
1944
1945 pstat = &_qcrypto_stat[cp->pdev->id];
1946
1947 rctx = aead_request_ctx(req);
1948 rctx->aead = 1;
1949 rctx->alg = CIPHER_ALG_AES;
1950
1951 /* Note. There is no such thing as aes/counter mode, decrypt */
1952 rctx->dir = QCE_ENCRYPT;
1953
1954 rctx->mode = QCE_MODE_CTR;
1955 rctx->iv = req->iv;
1956
1957 pstat->aead_sha1_aes_dec++;
1958 return _qcrypto_queue_req(cp, &req->base);
1959}
1960
1961static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req)
1962{
1963 struct aead_request *areq = &req->areq;
1964 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1965 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
1966 struct crypto_priv *cp = ctx->cp;
1967 struct qcrypto_cipher_req_ctx *rctx;
1968 struct crypto_stat *pstat;
1969
1970 pstat = &_qcrypto_stat[cp->pdev->id];
1971
1972 rctx = aead_request_ctx(areq);
1973 rctx->aead = 1;
1974 rctx->alg = CIPHER_ALG_AES;
1975 rctx->dir = QCE_ENCRYPT;
1976 rctx->mode = QCE_MODE_CTR;
1977 rctx->iv = req->giv; /* generated iv */
1978
1979 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1980 /* avoid consecutive packets going out with same IV */
1981 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1982 pstat->aead_sha1_aes_enc++;
1983 return _qcrypto_queue_req(cp, &areq->base);
1984};
1985#endif /* QCRYPTO_AEAD_AES_CTR */
1986
1987static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
1988{
1989 struct qcrypto_cipher_req_ctx *rctx;
1990 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1991 struct crypto_priv *cp = ctx->cp;
1992 struct crypto_stat *pstat;
1993
1994 pstat = &_qcrypto_stat[cp->pdev->id];
1995
1996 rctx = aead_request_ctx(req);
1997 rctx->aead = 1;
1998 rctx->alg = CIPHER_ALG_DES;
1999 rctx->dir = QCE_ENCRYPT;
2000 rctx->mode = QCE_MODE_CBC;
2001 rctx->iv = req->iv;
2002
2003 pstat->aead_sha1_des_enc++;
2004 return _qcrypto_queue_req(cp, &req->base);
2005}
2006
2007static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
2008{
2009 struct qcrypto_cipher_req_ctx *rctx;
2010 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
2011 struct crypto_priv *cp = ctx->cp;
2012 struct crypto_stat *pstat;
2013
2014 pstat = &_qcrypto_stat[cp->pdev->id];
2015
2016 rctx = aead_request_ctx(req);
2017 rctx->aead = 1;
2018 rctx->alg = CIPHER_ALG_DES;
2019 rctx->dir = QCE_DECRYPT;
2020 rctx->mode = QCE_MODE_CBC;
2021 rctx->iv = req->iv;
2022
2023 pstat->aead_sha1_des_dec++;
2024 return _qcrypto_queue_req(cp, &req->base);
2025}
2026
2027static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req)
2028{
2029 struct aead_request *areq = &req->areq;
2030 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
2031 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
2032 struct crypto_priv *cp = ctx->cp;
2033 struct qcrypto_cipher_req_ctx *rctx;
2034 struct crypto_stat *pstat;
2035
2036 pstat = &_qcrypto_stat[cp->pdev->id];
2037
2038 rctx = aead_request_ctx(areq);
2039 rctx->aead = 1;
2040 rctx->alg = CIPHER_ALG_DES;
2041 rctx->dir = QCE_ENCRYPT;
2042 rctx->mode = QCE_MODE_CBC;
2043 rctx->iv = req->giv; /* generated iv */
2044
2045 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
2046 /* avoid consecutive packets going out with same IV */
2047 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
2048 pstat->aead_sha1_des_enc++;
2049 return _qcrypto_queue_req(cp, &areq->base);
2050}
2051
2052static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
2053{
2054 struct qcrypto_cipher_req_ctx *rctx;
2055 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
2056 struct crypto_priv *cp = ctx->cp;
2057 struct crypto_stat *pstat;
2058
2059 pstat = &_qcrypto_stat[cp->pdev->id];
2060
2061 rctx = aead_request_ctx(req);
2062 rctx->aead = 1;
2063 rctx->alg = CIPHER_ALG_3DES;
2064 rctx->dir = QCE_ENCRYPT;
2065 rctx->mode = QCE_MODE_CBC;
2066 rctx->iv = req->iv;
2067
2068 pstat->aead_sha1_3des_enc++;
2069 return _qcrypto_queue_req(cp, &req->base);
2070}
2071
2072static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
2073{
2074 struct qcrypto_cipher_req_ctx *rctx;
2075 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
2076 struct crypto_priv *cp = ctx->cp;
2077 struct crypto_stat *pstat;
2078
2079 pstat = &_qcrypto_stat[cp->pdev->id];
2080
2081 rctx = aead_request_ctx(req);
2082 rctx->aead = 1;
2083 rctx->alg = CIPHER_ALG_3DES;
2084 rctx->dir = QCE_DECRYPT;
2085 rctx->mode = QCE_MODE_CBC;
2086 rctx->iv = req->iv;
2087
2088 pstat->aead_sha1_3des_dec++;
2089 return _qcrypto_queue_req(cp, &req->base);
2090}
2091
2092static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req)
2093{
2094 struct aead_request *areq = &req->areq;
2095 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
2096 struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
2097 struct crypto_priv *cp = ctx->cp;
2098 struct qcrypto_cipher_req_ctx *rctx;
2099 struct crypto_stat *pstat;
2100
2101 pstat = &_qcrypto_stat[cp->pdev->id];
2102
2103 rctx = aead_request_ctx(areq);
2104 rctx->aead = 1;
2105 rctx->alg = CIPHER_ALG_3DES;
2106 rctx->dir = QCE_ENCRYPT;
2107 rctx->mode = QCE_MODE_CBC;
2108 rctx->iv = req->giv; /* generated iv */
2109
2110 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
2111 /* avoid consecutive packets going out with same IV */
2112 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
2113 pstat->aead_sha1_3des_enc++;
2114 return _qcrypto_queue_req(cp, &areq->base);
2115}
2116
2117static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
2118{
2119 int i;
2120
2121 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
2122 nbytes -= sg->length;
2123
2124 return i;
2125}
2126
2127static int _sha_init(struct qcrypto_sha_ctx *ctx)
2128{
2129 ctx->first_blk = 1;
2130 ctx->last_blk = 0;
2131 ctx->byte_count[0] = 0;
2132 ctx->byte_count[1] = 0;
2133 ctx->byte_count[2] = 0;
2134 ctx->byte_count[3] = 0;
2135 ctx->trailing_buf_len = 0;
2136
2137 return 0;
2138};
2139
2140static int _sha1_init(struct ahash_request *req)
2141{
2142 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2143 struct crypto_priv *cp = sha_ctx->cp;
2144 struct crypto_stat *pstat;
2145
2146 pstat = &_qcrypto_stat[cp->pdev->id];
2147
2148 _sha_init(sha_ctx);
2149 sha_ctx->alg = QCE_HASH_SHA1;
2150
2151 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
2152 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2153 SHA1_DIGEST_SIZE);
2154 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2155 _update_sha1_ctx(req);
2156
2157 pstat->sha1_digest++;
2158 return 0;
2159};
2160
2161static int _sha256_init(struct ahash_request *req)
2162{
2163 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2164 struct crypto_priv *cp = sha_ctx->cp;
2165 struct crypto_stat *pstat;
2166
2167 pstat = &_qcrypto_stat[cp->pdev->id];
2168
2169 _sha_init(sha_ctx);
2170 sha_ctx->alg = QCE_HASH_SHA256;
2171
2172 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
2173 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2174 SHA256_DIGEST_SIZE);
2175 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2176 _update_sha256_ctx(req);
2177
2178 pstat->sha256_digest++;
2179 return 0;
2180};
2181
2182
2183static int _sha1_export(struct ahash_request *req, void *out)
2184{
2185 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2186 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
2187 struct sha1_state *out_ctx = (struct sha1_state *)out;
2188
2189 out_ctx->count = sha_state_ctx->count;
2190 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
2191 memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE);
2192
2193 return 0;
2194};
2195
2196static int _sha1_import(struct ahash_request *req, const void *in)
2197{
2198 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2199 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2200 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
2201 struct sha1_state *in_ctx = (struct sha1_state *)in;
2202
2203 sha_state_ctx->count = in_ctx->count;
2204 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2205 memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE);
2206 memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
2207
2208 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2209 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2210 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2211
2212 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2213 (SHA1_BLOCK_SIZE-1));
2214
2215 if (!(in_ctx->count))
2216 sha_ctx->first_blk = 1;
2217 else
2218 sha_ctx->first_blk = 0;
2219
2220 return 0;
2221}
2222static int _sha256_export(struct ahash_request *req, void *out)
2223{
2224 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2225 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2226 struct sha256_state *out_ctx = (struct sha256_state *)out;
2227
2228 out_ctx->count = sha_state_ctx->count;
2229 memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state));
2230 memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE);
2231
2232 return 0;
2233};
2234
2235static int _sha256_import(struct ahash_request *req, const void *in)
2236{
2237 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2238 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2239 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
2240 struct sha256_state *in_ctx = (struct sha256_state *)in;
2241
2242 sha_state_ctx->count = in_ctx->count;
2243 memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state));
2244 memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2245 memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
2246
2247 sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0);
2248 sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32);
2249 _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen);
2250
2251 sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count &
2252 (SHA256_BLOCK_SIZE-1));
2253
2254 if (!(in_ctx->count))
2255 sha_ctx->first_blk = 1;
2256 else
2257 sha_ctx->first_blk = 0;
2258
2259 return 0;
2260}
2261
Mona Hossainb43e94b2012-05-07 08:52:06 -07002262static void _copy_source(struct ahash_request *req)
2263{
2264 struct qcrypto_sha_req_ctx *srctx = NULL;
2265 uint32_t bytes = 0;
2266 struct scatterlist *sg = req->src;
2267
2268 srctx = ahash_request_ctx(req);
2269 srctx->orig_src = req->src;
2270 srctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
2271 for (sg = req->src; bytes != req->nbytes;
2272 sg++) {
2273 memcpy(((char *)srctx->data + bytes),
2274 sg_virt(sg), sg->length);
2275 bytes += sg->length;
2276 }
2277 sg_set_buf(&srctx->dsg, srctx->data,
2278 req->nbytes);
2279 sg_mark_end(&srctx->dsg);
2280 req->src = &srctx->dsg;
2281}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002282
2283static int _sha_update(struct ahash_request *req, uint32_t sha_block_size)
2284{
2285 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2286 struct crypto_priv *cp = sha_ctx->cp;
2287 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2288 uint32_t total, len, i, num_sg;
2289 uint8_t *k_src = NULL;
2290 uint32_t sha_pad_len = 0;
2291 uint32_t end_src = 0;
2292 uint32_t trailing_buf_len = 0;
2293 uint32_t nbytes, index = 0;
2294 uint32_t saved_length = 0;
2295 int ret = 0;
2296
2297 /* check for trailing buffer from previous updates and append it */
2298 total = req->nbytes + sha_ctx->trailing_buf_len;
2299 len = req->nbytes;
2300
2301 if (total <= sha_block_size) {
2302 i = 0;
2303
2304 k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len];
2305 while (len > 0) {
2306 memcpy(k_src, sg_virt(&req->src[i]),
2307 req->src[i].length);
2308 len -= req->src[i].length;
2309 k_src += req->src[i].length;
2310 i++;
2311 }
2312 sha_ctx->trailing_buf_len = total;
2313 if (sha_ctx->alg == QCE_HASH_SHA1)
2314 _update_sha1_ctx(req);
2315 if (sha_ctx->alg == QCE_HASH_SHA256)
2316 _update_sha256_ctx(req);
2317 return 0;
2318 }
2319
2320 /* save the original req structure fields*/
2321 rctx->src = req->src;
2322 rctx->nbytes = req->nbytes;
2323
2324 memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf,
2325 sha_ctx->trailing_buf_len);
2326 k_src = &sha_ctx->trailing_buf[0];
2327 /* get new trailing buffer */
2328 sha_pad_len = ALIGN(total, sha_block_size) - total;
2329 trailing_buf_len = sha_block_size - sha_pad_len;
2330 nbytes = total - trailing_buf_len;
2331 num_sg = qcrypto_count_sg(req->src, req->nbytes);
2332
2333 len = sha_ctx->trailing_buf_len;
2334 i = 0;
2335
2336 while (len < nbytes) {
2337 if ((len + req->src[i].length) > nbytes)
2338 break;
2339 len += req->src[i].length;
2340 i++;
2341 }
2342
2343 end_src = i;
2344 if (len < nbytes) {
2345 uint32_t remnant = (nbytes - len);
2346 memcpy(k_src, (sg_virt(&req->src[i]) + remnant),
2347 (req->src[i].length - remnant));
2348 k_src += (req->src[i].length - remnant);
2349 saved_length = req->src[i].length;
2350 index = i;
2351 req->src[i].length = remnant;
2352 i++;
2353 }
2354
2355 while (i < num_sg) {
2356 memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length);
2357 k_src += req->src[i].length;
2358 i++;
2359 }
2360
2361 if (sha_ctx->trailing_buf_len) {
Mona Hossainb43e94b2012-05-07 08:52:06 -07002362 if (cp->ce_support.aligned_only) {
2363 sha_ctx->sg = kzalloc(sizeof(struct scatterlist),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 GFP_KERNEL);
Mona Hossainb43e94b2012-05-07 08:52:06 -07002365 if (sha_ctx->sg == NULL) {
2366 pr_err("MemAlloc fail sha_ctx->sg, error %ld\n",
2367 PTR_ERR(sha_ctx->sg));
2368 return -ENOMEM;
2369 }
2370 rctx->data2 = kzalloc((req->nbytes + 64), GFP_KERNEL);
2371 if (rctx->data2 == NULL) {
2372 pr_err("Mem Alloc fail srctx->data2, err %ld\n",
2373 PTR_ERR(rctx->data2));
2374 kfree(sha_ctx->sg);
2375 return -ENOMEM;
2376 }
2377 memcpy(rctx->data2, sha_ctx->tmp_tbuf,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002378 sha_ctx->trailing_buf_len);
Mona Hossainb43e94b2012-05-07 08:52:06 -07002379 memcpy((rctx->data2 + sha_ctx->trailing_buf_len),
2380 rctx->data, req->src[i-1].length);
2381 kfree(rctx->data);
2382 rctx->data = rctx->data2;
2383 sg_set_buf(&sha_ctx->sg[0], rctx->data,
2384 (sha_ctx->trailing_buf_len +
2385 req->src[i-1].length));
2386 req->src = sha_ctx->sg;
2387 sg_mark_end(&sha_ctx->sg[0]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002388
Mona Hossainb43e94b2012-05-07 08:52:06 -07002389 } else {
2390 num_sg = end_src + 2;
2391
2392 sha_ctx->sg = kzalloc(num_sg *
2393 (sizeof(struct scatterlist)), GFP_KERNEL);
2394 if (sha_ctx->sg == NULL) {
2395 pr_err("MEMalloc fail sha_ctx->sg, error %ld\n",
2396 PTR_ERR(sha_ctx->sg));
2397 return -ENOMEM;
2398 }
2399
2400 sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
2401 sha_ctx->trailing_buf_len);
2402 for (i = 1; i < num_sg; i++)
2403 sg_set_buf(&sha_ctx->sg[i],
2404 sg_virt(&req->src[i-1]),
2405 req->src[i-1].length);
2406
2407 req->src = sha_ctx->sg;
2408 sg_mark_end(&sha_ctx->sg[num_sg - 1]);
2409
2410 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002411 } else
2412 sg_mark_end(&req->src[end_src]);
2413
2414 req->nbytes = nbytes;
2415 if (saved_length > 0)
2416 rctx->src[index].length = saved_length;
2417 sha_ctx->trailing_buf_len = trailing_buf_len;
2418
2419 ret = _qcrypto_queue_req(cp, &req->base);
2420
2421 return ret;
2422};
2423
2424static int _sha1_update(struct ahash_request *req)
2425{
2426 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2427 struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
Mona Hossainb43e94b2012-05-07 08:52:06 -07002428 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2429 struct crypto_priv *cp = sha_ctx->cp;
2430
2431 if (cp->ce_support.aligned_only)
2432 _copy_source(req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002433
2434 sha_state_ctx->count += req->nbytes;
2435 return _sha_update(req, SHA1_BLOCK_SIZE);
2436}
2437
2438static int _sha256_update(struct ahash_request *req)
2439{
2440 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2441 struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
Mona Hossainb43e94b2012-05-07 08:52:06 -07002442 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2443 struct crypto_priv *cp = sha_ctx->cp;
2444
2445 if (cp->ce_support.aligned_only)
2446 _copy_source(req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002447
2448 sha_state_ctx->count += req->nbytes;
2449 return _sha_update(req, SHA256_BLOCK_SIZE);
2450}
2451
2452static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
2453{
2454 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2455 struct crypto_priv *cp = sha_ctx->cp;
2456 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2457 int ret = 0;
2458
Mona Hossainb43e94b2012-05-07 08:52:06 -07002459 if (cp->ce_support.aligned_only)
2460 _copy_source(req);
2461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462 sha_ctx->last_blk = 1;
2463
2464 /* save the original req structure fields*/
2465 rctx->src = req->src;
2466 rctx->nbytes = req->nbytes;
2467
2468 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2469 sha_ctx->trailing_buf_len);
2470 sg_mark_end(&sha_ctx->tmp_sg);
2471
2472 req->src = &sha_ctx->tmp_sg;
2473 req->nbytes = sha_ctx->trailing_buf_len;
2474
2475 ret = _qcrypto_queue_req(cp, &req->base);
2476
2477 return ret;
2478};
2479
2480static int _sha1_final(struct ahash_request *req)
2481{
2482 return _sha_final(req, SHA1_BLOCK_SIZE);
2483}
2484
2485static int _sha256_final(struct ahash_request *req)
2486{
2487 return _sha_final(req, SHA256_BLOCK_SIZE);
2488}
2489
2490static int _sha_digest(struct ahash_request *req)
2491{
2492 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2493 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2494 struct crypto_priv *cp = sha_ctx->cp;
2495 int ret = 0;
2496
Mona Hossainb43e94b2012-05-07 08:52:06 -07002497 if (cp->ce_support.aligned_only)
2498 _copy_source(req);
2499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002500 /* save the original req structure fields*/
2501 rctx->src = req->src;
2502 rctx->nbytes = req->nbytes;
Mona Hossainb43e94b2012-05-07 08:52:06 -07002503 sha_ctx->first_blk = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504 sha_ctx->last_blk = 1;
2505 ret = _qcrypto_queue_req(cp, &req->base);
2506
2507 return ret;
2508}
2509
2510static int _sha1_digest(struct ahash_request *req)
2511{
2512 _sha1_init(req);
2513 return _sha_digest(req);
2514}
2515
2516static int _sha256_digest(struct ahash_request *req)
2517{
2518 _sha256_init(req);
2519 return _sha_digest(req);
2520}
2521
2522static void _crypto_sha_hmac_ahash_req_complete(
2523 struct crypto_async_request *req, int err)
2524{
2525 struct completion *ahash_req_complete = req->data;
2526
2527 if (err == -EINPROGRESS)
2528 return;
2529 complete(ahash_req_complete);
2530}
2531
2532static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2533 unsigned int len)
2534{
2535 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2536 int ret = 0;
2537
Mona Hossainb43e94b2012-05-07 08:52:06 -07002538 sha_ctx->in_buf = kzalloc(len + 64, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002539 if (sha_ctx->in_buf == NULL) {
2540 pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
2541 PTR_ERR(sha_ctx->in_buf));
2542 return -ENOMEM;
2543 }
2544 memcpy(sha_ctx->in_buf, key, len);
2545 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len);
2546 sg_mark_end(&sha_ctx->tmp_sg);
2547
2548 ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg,
2549 &sha_ctx->authkey[0], len);
2550
2551 ret = _sha_digest(sha_ctx->ahash_req);
2552 if (ret == -EINPROGRESS || ret == -EBUSY) {
2553 ret =
2554 wait_for_completion_interruptible(
2555 &sha_ctx->ahash_req_complete);
2556 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2557 }
2558
2559 sha_ctx->authkey_in_len = len;
2560 kfree(sha_ctx->in_buf);
2561 sha_ctx->in_buf = NULL;
2562
2563 return ret;
2564}
2565
2566static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2567 unsigned int len)
2568{
2569 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2570
2571 if (len <= SHA1_BLOCK_SIZE)
2572 memcpy(&sha_ctx->authkey[0], key, len);
2573 else {
2574 _sha_init(sha_ctx);
2575 sha_ctx->alg = QCE_HASH_SHA1;
2576 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2577 SHA1_DIGEST_SIZE);
2578 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2579 _sha_hmac_setkey(tfm, key, len);
2580 }
2581 return 0;
2582}
2583
2584static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
2585 unsigned int len)
2586{
2587 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
2588
2589 if (len <= SHA256_BLOCK_SIZE)
2590 memcpy(&sha_ctx->authkey[0], key, len);
2591 else {
2592 _sha_init(sha_ctx);
2593 sha_ctx->alg = QCE_HASH_SHA256;
2594 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2595 SHA256_DIGEST_SIZE);
2596 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2597 _sha_hmac_setkey(tfm, key, len);
2598 }
2599
2600 return 0;
2601}
2602
2603static int _sha_hmac_init_ihash(struct ahash_request *req,
2604 uint32_t sha_block_size)
2605{
2606 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2607 int i;
2608
2609 for (i = 0; i < sha_block_size; i++)
2610 sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
2611 sha_ctx->trailing_buf_len = sha_block_size;
2612
2613 return 0;
2614}
2615
2616static int _sha1_hmac_init(struct ahash_request *req)
2617{
2618 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2619 struct crypto_priv *cp = sha_ctx->cp;
2620 struct crypto_stat *pstat;
2621 int ret = 0;
2622
2623 pstat = &_qcrypto_stat[cp->pdev->id];
2624 pstat->sha1_hmac_digest++;
2625
2626 _sha_init(sha_ctx);
2627 memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
2628 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2629 SHA1_DIGEST_SIZE);
2630 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2631 _update_sha1_ctx(req);
2632
2633 if (cp->ce_support.sha_hmac)
2634 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2635 else {
2636 sha_ctx->alg = QCE_HASH_SHA1;
2637 ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
2638 }
2639
2640 return ret;
2641}
2642
2643static int _sha256_hmac_init(struct ahash_request *req)
2644{
2645 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2646 struct crypto_priv *cp = sha_ctx->cp;
2647 struct crypto_stat *pstat;
2648 int ret = 0;
2649
2650 pstat = &_qcrypto_stat[cp->pdev->id];
2651 pstat->sha256_hmac_digest++;
2652
2653 _sha_init(sha_ctx);
2654 memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
2655 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2656 SHA256_DIGEST_SIZE);
2657 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2658 _update_sha256_ctx(req);
2659
2660 if (cp->ce_support.sha_hmac)
2661 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2662 else {
2663 sha_ctx->alg = QCE_HASH_SHA256;
2664 ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
2665 }
2666
2667 return ret;
2668}
2669
2670static int _sha1_hmac_update(struct ahash_request *req)
2671{
2672 return _sha1_update(req);
2673}
2674
2675static int _sha256_hmac_update(struct ahash_request *req)
2676{
2677 return _sha256_update(req);
2678}
2679
2680static int _sha_hmac_outer_hash(struct ahash_request *req,
2681 uint32_t sha_digest_size, uint32_t sha_block_size)
2682{
2683 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2684 struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
2685 struct crypto_priv *cp = sha_ctx->cp;
2686 int i;
2687
2688 for (i = 0; i < sha_block_size; i++)
2689 sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c;
2690
2691 /* save the original req structure fields*/
2692 rctx->src = req->src;
2693 rctx->nbytes = req->nbytes;
2694
2695 memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0],
2696 sha_digest_size);
2697
2698 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size +
2699 sha_digest_size);
2700 sg_mark_end(&sha_ctx->tmp_sg);
2701 req->src = &sha_ctx->tmp_sg;
2702 req->nbytes = sha_block_size + sha_digest_size;
2703
2704 _sha_init(sha_ctx);
2705 if (sha_ctx->alg == QCE_HASH_SHA1) {
2706 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2707 SHA1_DIGEST_SIZE);
2708 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2709 } else {
2710 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2711 SHA256_DIGEST_SIZE);
2712 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2713 }
2714
2715 sha_ctx->last_blk = 1;
2716 return _qcrypto_queue_req(cp, &req->base);
2717}
2718
2719static int _sha_hmac_inner_hash(struct ahash_request *req,
2720 uint32_t sha_digest_size, uint32_t sha_block_size)
2721{
2722 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2723 struct ahash_request *areq = sha_ctx->ahash_req;
2724 struct crypto_priv *cp = sha_ctx->cp;
2725 int ret = 0;
2726
2727 sha_ctx->last_blk = 1;
2728
2729 sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf,
2730 sha_ctx->trailing_buf_len);
2731 sg_mark_end(&sha_ctx->tmp_sg);
2732
2733 ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0],
2734 sha_ctx->trailing_buf_len);
2735 sha_ctx->last_blk = 1;
2736 ret = _qcrypto_queue_req(cp, &areq->base);
2737
2738 if (ret == -EINPROGRESS || ret == -EBUSY) {
2739 ret =
2740 wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
2741 INIT_COMPLETION(sha_ctx->ahash_req_complete);
2742 }
2743
2744 return ret;
2745}
2746
2747static int _sha1_hmac_final(struct ahash_request *req)
2748{
2749 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2750 struct crypto_priv *cp = sha_ctx->cp;
2751 int ret = 0;
2752
2753 if (cp->ce_support.sha_hmac)
2754 return _sha_final(req, SHA1_BLOCK_SIZE);
2755 else {
2756 ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE,
2757 SHA1_BLOCK_SIZE);
2758 if (ret)
2759 return ret;
2760 return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE,
2761 SHA1_BLOCK_SIZE);
2762 }
2763}
2764
2765static int _sha256_hmac_final(struct ahash_request *req)
2766{
2767 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2768 struct crypto_priv *cp = sha_ctx->cp;
2769 int ret = 0;
2770
2771 if (cp->ce_support.sha_hmac)
2772 return _sha_final(req, SHA256_BLOCK_SIZE);
2773 else {
2774 ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE,
2775 SHA256_BLOCK_SIZE);
2776 if (ret)
2777 return ret;
2778 return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE,
2779 SHA256_BLOCK_SIZE);
2780 }
2781 return 0;
2782}
2783
2784
2785static int _sha1_hmac_digest(struct ahash_request *req)
2786{
2787 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2788 struct crypto_priv *cp = sha_ctx->cp;
2789 struct crypto_stat *pstat;
2790
2791 pstat = &_qcrypto_stat[cp->pdev->id];
2792 pstat->sha1_hmac_digest++;
2793
2794 _sha_init(sha_ctx);
2795 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0],
2796 SHA1_DIGEST_SIZE);
2797 sha_ctx->diglen = SHA1_DIGEST_SIZE;
2798 sha_ctx->alg = QCE_HASH_SHA1_HMAC;
2799
2800 return _sha_digest(req);
2801}
2802
2803static int _sha256_hmac_digest(struct ahash_request *req)
2804{
2805 struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
2806 struct crypto_priv *cp = sha_ctx->cp;
2807 struct crypto_stat *pstat;
2808
2809 pstat = &_qcrypto_stat[cp->pdev->id];
2810 pstat->sha256_hmac_digest++;
2811
2812 _sha_init(sha_ctx);
2813 memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0],
2814 SHA256_DIGEST_SIZE);
2815 sha_ctx->diglen = SHA256_DIGEST_SIZE;
2816 sha_ctx->alg = QCE_HASH_SHA256_HMAC;
2817
2818 return _sha_digest(req);
2819}
2820
2821static struct ahash_alg _qcrypto_ahash_algos[] = {
2822 {
2823 .init = _sha1_init,
2824 .update = _sha1_update,
2825 .final = _sha1_final,
2826 .export = _sha1_export,
2827 .import = _sha1_import,
2828 .digest = _sha1_digest,
2829 .halg = {
2830 .digestsize = SHA1_DIGEST_SIZE,
2831 .statesize = sizeof(struct sha1_state),
2832 .base = {
2833 .cra_name = "sha1",
2834 .cra_driver_name = "qcrypto-sha1",
2835 .cra_priority = 300,
2836 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2837 CRYPTO_ALG_ASYNC,
2838 .cra_blocksize = SHA1_BLOCK_SIZE,
2839 .cra_ctxsize =
2840 sizeof(struct qcrypto_sha_ctx),
2841 .cra_alignmask = 0,
2842 .cra_type = &crypto_ahash_type,
2843 .cra_module = THIS_MODULE,
2844 .cra_init = _qcrypto_ahash_cra_init,
2845 .cra_exit = _qcrypto_ahash_cra_exit,
2846 },
2847 },
2848 },
2849 {
2850 .init = _sha256_init,
2851 .update = _sha256_update,
2852 .final = _sha256_final,
2853 .export = _sha256_export,
2854 .import = _sha256_import,
2855 .digest = _sha256_digest,
2856 .halg = {
2857 .digestsize = SHA256_DIGEST_SIZE,
2858 .statesize = sizeof(struct sha256_state),
2859 .base = {
2860 .cra_name = "sha256",
2861 .cra_driver_name = "qcrypto-sha256",
2862 .cra_priority = 300,
2863 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2864 CRYPTO_ALG_ASYNC,
2865 .cra_blocksize = SHA256_BLOCK_SIZE,
2866 .cra_ctxsize =
2867 sizeof(struct qcrypto_sha_ctx),
2868 .cra_alignmask = 0,
2869 .cra_type = &crypto_ahash_type,
2870 .cra_module = THIS_MODULE,
2871 .cra_init = _qcrypto_ahash_cra_init,
2872 .cra_exit = _qcrypto_ahash_cra_exit,
2873 },
2874 },
2875 },
2876};
2877
2878static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
2879 {
2880 .init = _sha1_hmac_init,
2881 .update = _sha1_hmac_update,
2882 .final = _sha1_hmac_final,
2883 .export = _sha1_export,
2884 .import = _sha1_import,
2885 .digest = _sha1_hmac_digest,
2886 .setkey = _sha1_hmac_setkey,
2887 .halg = {
2888 .digestsize = SHA1_DIGEST_SIZE,
2889 .statesize = sizeof(struct sha1_state),
2890 .base = {
2891 .cra_name = "hmac(sha1)",
2892 .cra_driver_name = "qcrypto-hmac-sha1",
2893 .cra_priority = 300,
2894 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2895 CRYPTO_ALG_ASYNC,
2896 .cra_blocksize = SHA1_BLOCK_SIZE,
2897 .cra_ctxsize =
2898 sizeof(struct qcrypto_sha_ctx),
2899 .cra_alignmask = 0,
2900 .cra_type = &crypto_ahash_type,
2901 .cra_module = THIS_MODULE,
2902 .cra_init = _qcrypto_ahash_hmac_cra_init,
2903 .cra_exit = _qcrypto_ahash_cra_exit,
2904 },
2905 },
2906 },
2907 {
2908 .init = _sha256_hmac_init,
2909 .update = _sha256_hmac_update,
2910 .final = _sha256_hmac_final,
2911 .export = _sha256_export,
2912 .import = _sha256_import,
2913 .digest = _sha256_hmac_digest,
2914 .setkey = _sha256_hmac_setkey,
2915 .halg = {
2916 .digestsize = SHA256_DIGEST_SIZE,
2917 .statesize = sizeof(struct sha256_state),
2918 .base = {
2919 .cra_name = "hmac(sha256)",
2920 .cra_driver_name = "qcrypto-hmac-sha256",
2921 .cra_priority = 300,
2922 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2923 CRYPTO_ALG_ASYNC,
2924 .cra_blocksize = SHA256_BLOCK_SIZE,
2925 .cra_ctxsize =
2926 sizeof(struct qcrypto_sha_ctx),
2927 .cra_alignmask = 0,
2928 .cra_type = &crypto_ahash_type,
2929 .cra_module = THIS_MODULE,
2930 .cra_init = _qcrypto_ahash_hmac_cra_init,
2931 .cra_exit = _qcrypto_ahash_cra_exit,
2932 },
2933 },
2934 },
2935};
2936
2937static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
2938 {
2939 .cra_name = "ecb(aes)",
2940 .cra_driver_name = "qcrypto-ecb-aes",
2941 .cra_priority = 300,
2942 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2943 .cra_blocksize = AES_BLOCK_SIZE,
2944 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2945 .cra_alignmask = 0,
2946 .cra_type = &crypto_ablkcipher_type,
2947 .cra_module = THIS_MODULE,
2948 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002949 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 .cra_u = {
2951 .ablkcipher = {
2952 .min_keysize = AES_MIN_KEY_SIZE,
2953 .max_keysize = AES_MAX_KEY_SIZE,
2954 .setkey = _qcrypto_setkey_aes,
2955 .encrypt = _qcrypto_enc_aes_ecb,
2956 .decrypt = _qcrypto_dec_aes_ecb,
2957 },
2958 },
2959 },
2960 {
2961 .cra_name = "cbc(aes)",
2962 .cra_driver_name = "qcrypto-cbc-aes",
2963 .cra_priority = 300,
2964 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2965 .cra_blocksize = AES_BLOCK_SIZE,
2966 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2967 .cra_alignmask = 0,
2968 .cra_type = &crypto_ablkcipher_type,
2969 .cra_module = THIS_MODULE,
2970 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002971 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 .cra_u = {
2973 .ablkcipher = {
2974 .ivsize = AES_BLOCK_SIZE,
2975 .min_keysize = AES_MIN_KEY_SIZE,
2976 .max_keysize = AES_MAX_KEY_SIZE,
2977 .setkey = _qcrypto_setkey_aes,
2978 .encrypt = _qcrypto_enc_aes_cbc,
2979 .decrypt = _qcrypto_dec_aes_cbc,
2980 },
2981 },
2982 },
2983 {
2984 .cra_name = "ctr(aes)",
2985 .cra_driver_name = "qcrypto-ctr-aes",
2986 .cra_priority = 300,
2987 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
2988 .cra_blocksize = AES_BLOCK_SIZE,
2989 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
2990 .cra_alignmask = 0,
2991 .cra_type = &crypto_ablkcipher_type,
2992 .cra_module = THIS_MODULE,
2993 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08002994 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002995 .cra_u = {
2996 .ablkcipher = {
2997 .ivsize = AES_BLOCK_SIZE,
2998 .min_keysize = AES_MIN_KEY_SIZE,
2999 .max_keysize = AES_MAX_KEY_SIZE,
3000 .setkey = _qcrypto_setkey_aes,
3001 .encrypt = _qcrypto_enc_aes_ctr,
3002 .decrypt = _qcrypto_dec_aes_ctr,
3003 },
3004 },
3005 },
3006 {
3007 .cra_name = "ecb(des)",
3008 .cra_driver_name = "qcrypto-ecb-des",
3009 .cra_priority = 300,
3010 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3011 .cra_blocksize = DES_BLOCK_SIZE,
3012 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3013 .cra_alignmask = 0,
3014 .cra_type = &crypto_ablkcipher_type,
3015 .cra_module = THIS_MODULE,
3016 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003017 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003018 .cra_u = {
3019 .ablkcipher = {
3020 .min_keysize = DES_KEY_SIZE,
3021 .max_keysize = DES_KEY_SIZE,
3022 .setkey = _qcrypto_setkey_des,
3023 .encrypt = _qcrypto_enc_des_ecb,
3024 .decrypt = _qcrypto_dec_des_ecb,
3025 },
3026 },
3027 },
3028 {
3029 .cra_name = "cbc(des)",
3030 .cra_driver_name = "qcrypto-cbc-des",
3031 .cra_priority = 300,
3032 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3033 .cra_blocksize = DES_BLOCK_SIZE,
3034 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3035 .cra_alignmask = 0,
3036 .cra_type = &crypto_ablkcipher_type,
3037 .cra_module = THIS_MODULE,
3038 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003039 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003040 .cra_u = {
3041 .ablkcipher = {
3042 .ivsize = DES_BLOCK_SIZE,
3043 .min_keysize = DES_KEY_SIZE,
3044 .max_keysize = DES_KEY_SIZE,
3045 .setkey = _qcrypto_setkey_des,
3046 .encrypt = _qcrypto_enc_des_cbc,
3047 .decrypt = _qcrypto_dec_des_cbc,
3048 },
3049 },
3050 },
3051 {
3052 .cra_name = "ecb(des3_ede)",
3053 .cra_driver_name = "qcrypto-ecb-3des",
3054 .cra_priority = 300,
3055 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3056 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3057 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3058 .cra_alignmask = 0,
3059 .cra_type = &crypto_ablkcipher_type,
3060 .cra_module = THIS_MODULE,
3061 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003062 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003063 .cra_u = {
3064 .ablkcipher = {
3065 .min_keysize = DES3_EDE_KEY_SIZE,
3066 .max_keysize = DES3_EDE_KEY_SIZE,
3067 .setkey = _qcrypto_setkey_3des,
3068 .encrypt = _qcrypto_enc_3des_ecb,
3069 .decrypt = _qcrypto_dec_3des_ecb,
3070 },
3071 },
3072 },
3073 {
3074 .cra_name = "cbc(des3_ede)",
3075 .cra_driver_name = "qcrypto-cbc-3des",
3076 .cra_priority = 300,
3077 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3078 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3079 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3080 .cra_alignmask = 0,
3081 .cra_type = &crypto_ablkcipher_type,
3082 .cra_module = THIS_MODULE,
3083 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003084 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003085 .cra_u = {
3086 .ablkcipher = {
3087 .ivsize = DES3_EDE_BLOCK_SIZE,
3088 .min_keysize = DES3_EDE_KEY_SIZE,
3089 .max_keysize = DES3_EDE_KEY_SIZE,
3090 .setkey = _qcrypto_setkey_3des,
3091 .encrypt = _qcrypto_enc_3des_cbc,
3092 .decrypt = _qcrypto_dec_3des_cbc,
3093 },
3094 },
3095 },
3096};
3097
3098static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
3099 .cra_name = "xts(aes)",
3100 .cra_driver_name = "qcrypto-xts-aes",
3101 .cra_priority = 300,
3102 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3103 .cra_blocksize = AES_BLOCK_SIZE,
3104 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3105 .cra_alignmask = 0,
3106 .cra_type = &crypto_ablkcipher_type,
3107 .cra_module = THIS_MODULE,
3108 .cra_init = _qcrypto_cra_ablkcipher_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003109 .cra_exit = _qcrypto_cra_ablkcipher_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 .cra_u = {
3111 .ablkcipher = {
3112 .ivsize = AES_BLOCK_SIZE,
3113 .min_keysize = AES_MIN_KEY_SIZE,
3114 .max_keysize = AES_MAX_KEY_SIZE,
3115 .setkey = _qcrypto_setkey_aes,
3116 .encrypt = _qcrypto_enc_aes_xts,
3117 .decrypt = _qcrypto_dec_aes_xts,
3118 },
3119 },
3120};
3121
3122static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = {
3123 {
3124 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3125 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
3126 .cra_priority = 300,
3127 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
3128 .cra_blocksize = AES_BLOCK_SIZE,
3129 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3130 .cra_alignmask = 0,
3131 .cra_type = &crypto_aead_type,
3132 .cra_module = THIS_MODULE,
3133 .cra_init = _qcrypto_cra_aead_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003134 .cra_exit = _qcrypto_cra_aead_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003135 .cra_u = {
3136 .aead = {
3137 .ivsize = AES_BLOCK_SIZE,
3138 .maxauthsize = SHA1_DIGEST_SIZE,
3139 .setkey = _qcrypto_aead_setkey,
3140 .setauthsize = _qcrypto_aead_setauthsize,
3141 .encrypt = _qcrypto_aead_encrypt_aes_cbc,
3142 .decrypt = _qcrypto_aead_decrypt_aes_cbc,
3143 .givencrypt = _qcrypto_aead_givencrypt_aes_cbc,
3144 .geniv = "<built-in>",
3145 }
3146 }
3147 },
3148
3149#ifdef QCRYPTO_AEAD_AES_CTR
3150 {
3151 .cra_name = "authenc(hmac(sha1),ctr(aes))",
3152 .cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes",
3153 .cra_priority = 300,
3154 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
3155 .cra_blocksize = AES_BLOCK_SIZE,
3156 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3157 .cra_alignmask = 0,
3158 .cra_type = &crypto_aead_type,
3159 .cra_module = THIS_MODULE,
3160 .cra_init = _qcrypto_cra_aead_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003161 .cra_exit = _qcrypto_cra_aead_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 .cra_u = {
3163 .aead = {
3164 .ivsize = AES_BLOCK_SIZE,
3165 .maxauthsize = SHA1_DIGEST_SIZE,
3166 .setkey = _qcrypto_aead_setkey,
3167 .setauthsize = _qcrypto_aead_setauthsize,
3168 .encrypt = _qcrypto_aead_encrypt_aes_ctr,
3169 .decrypt = _qcrypto_aead_decrypt_aes_ctr,
3170 .givencrypt = _qcrypto_aead_givencrypt_aes_ctr,
3171 .geniv = "<built-in>",
3172 }
3173 }
3174 },
3175#endif /* QCRYPTO_AEAD_AES_CTR */
3176 {
3177 .cra_name = "authenc(hmac(sha1),cbc(des))",
3178 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
3179 .cra_priority = 300,
3180 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
3181 .cra_blocksize = DES_BLOCK_SIZE,
3182 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3183 .cra_alignmask = 0,
3184 .cra_type = &crypto_aead_type,
3185 .cra_module = THIS_MODULE,
3186 .cra_init = _qcrypto_cra_aead_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003187 .cra_exit = _qcrypto_cra_aead_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003188 .cra_u = {
3189 .aead = {
3190 .ivsize = DES_BLOCK_SIZE,
3191 .maxauthsize = SHA1_DIGEST_SIZE,
3192 .setkey = _qcrypto_aead_setkey,
3193 .setauthsize = _qcrypto_aead_setauthsize,
3194 .encrypt = _qcrypto_aead_encrypt_des_cbc,
3195 .decrypt = _qcrypto_aead_decrypt_des_cbc,
3196 .givencrypt = _qcrypto_aead_givencrypt_des_cbc,
3197 .geniv = "<built-in>",
3198 }
3199 }
3200 },
3201 {
3202 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3203 .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
3204 .cra_priority = 300,
3205 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
3206 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3207 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3208 .cra_alignmask = 0,
3209 .cra_type = &crypto_aead_type,
3210 .cra_module = THIS_MODULE,
3211 .cra_init = _qcrypto_cra_aead_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003212 .cra_exit = _qcrypto_cra_aead_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003213 .cra_u = {
3214 .aead = {
3215 .ivsize = DES3_EDE_BLOCK_SIZE,
3216 .maxauthsize = SHA1_DIGEST_SIZE,
3217 .setkey = _qcrypto_aead_setkey,
3218 .setauthsize = _qcrypto_aead_setauthsize,
3219 .encrypt = _qcrypto_aead_encrypt_3des_cbc,
3220 .decrypt = _qcrypto_aead_decrypt_3des_cbc,
3221 .givencrypt = _qcrypto_aead_givencrypt_3des_cbc,
3222 .geniv = "<built-in>",
3223 }
3224 }
3225 },
3226};
3227
3228static struct crypto_alg _qcrypto_aead_ccm_algo = {
3229 .cra_name = "ccm(aes)",
3230 .cra_driver_name = "qcrypto-aes-ccm",
3231 .cra_priority = 300,
3232 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
3233 .cra_blocksize = AES_BLOCK_SIZE,
3234 .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx),
3235 .cra_alignmask = 0,
3236 .cra_type = &crypto_aead_type,
3237 .cra_module = THIS_MODULE,
3238 .cra_init = _qcrypto_cra_aead_init,
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003239 .cra_exit = _qcrypto_cra_aead_exit,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003240 .cra_u = {
3241 .aead = {
3242 .ivsize = AES_BLOCK_SIZE,
3243 .maxauthsize = SHA1_DIGEST_SIZE,
3244 .setkey = _qcrypto_aead_ccm_setkey,
3245 .setauthsize = _qcrypto_aead_ccm_setauthsize,
3246 .encrypt = _qcrypto_aead_encrypt_aes_ccm,
3247 .decrypt = _qcrypto_aead_decrypt_aes_ccm,
3248 .geniv = "<built-in>",
3249 }
3250 }
3251};
3252
3253
3254static int _qcrypto_probe(struct platform_device *pdev)
3255{
3256 int rc = 0;
3257 void *handle;
3258 struct crypto_priv *cp;
3259 int i;
3260 struct msm_ce_hw_support *platform_support;
3261
3262 if (pdev->id >= MAX_CRYPTO_DEVICE) {
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003263 pr_err("%s: device id %d exceeds allowed %d\n",
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264 __func__, pdev->id, MAX_CRYPTO_DEVICE);
3265 return -ENOENT;
3266 }
3267
3268 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
3269 if (!cp) {
3270 pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n",
3271 PTR_ERR(cp));
3272 return -ENOMEM;
3273 }
3274
3275 /* open qce */
3276 handle = qce_open(pdev, &rc);
3277 if (handle == NULL) {
3278 kfree(cp);
3279 platform_set_drvdata(pdev, NULL);
3280 return rc;
3281 }
3282
3283 INIT_LIST_HEAD(&cp->alg_list);
3284 platform_set_drvdata(pdev, cp);
3285 spin_lock_init(&cp->lock);
3286 tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp);
3287 crypto_init_queue(&cp->queue, 50);
3288 cp->qce = handle;
3289 cp->pdev = pdev;
3290 qce_hw_support(cp->qce, &cp->ce_support);
Mona Hossainb43e94b2012-05-07 08:52:06 -07003291 if (cp->ce_support.bam) {
3292 cp->platform_support.ce_shared = 0;
3293 cp->platform_support.shared_ce_resource = 0;
3294 cp->platform_support.hw_key_support = 0;
3295 cp->platform_support.bus_scale_table = NULL;
3296 cp->platform_support.sha_hmac = 1;
3297 } else {
3298 platform_support =
3299 (struct msm_ce_hw_support *)pdev->dev.platform_data;
3300 cp->platform_support.ce_shared = platform_support->ce_shared;
3301 cp->platform_support.shared_ce_resource =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003302 platform_support->shared_ce_resource;
Mona Hossainb43e94b2012-05-07 08:52:06 -07003303 cp->platform_support.hw_key_support =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003304 platform_support->hw_key_support;
Mona Hossainb43e94b2012-05-07 08:52:06 -07003305 cp->platform_support.bus_scale_table =
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003306 platform_support->bus_scale_table;
Mona Hossainb43e94b2012-05-07 08:52:06 -07003307 cp->platform_support.sha_hmac = platform_support->sha_hmac;
3308 }
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003309 cp->high_bw_req_count = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003310 cp->ce_lock_count = 0;
Mona Hossainb43e94b2012-05-07 08:52:06 -07003311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003312
3313 if (cp->platform_support.ce_shared)
3314 INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
3315
Ramesh Masavarapu49259682011-12-02 14:00:18 -08003316 if (cp->platform_support.bus_scale_table != NULL) {
3317 cp->bus_scale_handle =
3318 msm_bus_scale_register_client(
3319 (struct msm_bus_scale_pdata *)
3320 cp->platform_support.bus_scale_table);
3321 if (!cp->bus_scale_handle) {
3322 printk(KERN_ERR "%s not able to get bus scale\n",
3323 __func__);
3324 rc = -ENOMEM;
3325 goto err;
3326 }
3327 }
3328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003329 /* register crypto cipher algorithms the device supports */
3330 for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
3331 struct qcrypto_alg *q_alg;
3332
3333 q_alg = _qcrypto_cipher_alg_alloc(cp,
3334 &_qcrypto_ablk_cipher_algos[i]);
3335 if (IS_ERR(q_alg)) {
3336 rc = PTR_ERR(q_alg);
3337 goto err;
3338 }
3339 rc = crypto_register_alg(&q_alg->cipher_alg);
3340 if (rc) {
3341 dev_err(&pdev->dev, "%s alg registration failed\n",
3342 q_alg->cipher_alg.cra_driver_name);
3343 kfree(q_alg);
3344 } else {
3345 list_add_tail(&q_alg->entry, &cp->alg_list);
3346 dev_info(&pdev->dev, "%s\n",
3347 q_alg->cipher_alg.cra_driver_name);
3348 }
3349 }
3350
3351 /* register crypto cipher algorithms the device supports */
3352 if (cp->ce_support.aes_xts) {
3353 struct qcrypto_alg *q_alg;
3354
3355 q_alg = _qcrypto_cipher_alg_alloc(cp,
3356 &_qcrypto_ablk_cipher_xts_algo);
3357 if (IS_ERR(q_alg)) {
3358 rc = PTR_ERR(q_alg);
3359 goto err;
3360 }
3361 rc = crypto_register_alg(&q_alg->cipher_alg);
3362 if (rc) {
3363 dev_err(&pdev->dev, "%s alg registration failed\n",
3364 q_alg->cipher_alg.cra_driver_name);
3365 kfree(q_alg);
3366 } else {
3367 list_add_tail(&q_alg->entry, &cp->alg_list);
3368 dev_info(&pdev->dev, "%s\n",
3369 q_alg->cipher_alg.cra_driver_name);
3370 }
3371 }
3372
3373 /*
3374 * Register crypto hash (sha1 and sha256) algorithms the
3375 * device supports
3376 */
3377 for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
3378 struct qcrypto_alg *q_alg = NULL;
3379
3380 q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
3381
3382 if (IS_ERR(q_alg)) {
3383 rc = PTR_ERR(q_alg);
3384 goto err;
3385 }
3386
3387 rc = crypto_register_ahash(&q_alg->sha_alg);
3388 if (rc) {
3389 dev_err(&pdev->dev, "%s alg registration failed\n",
3390 q_alg->sha_alg.halg.base.cra_driver_name);
3391 kfree(q_alg);
3392 } else {
3393 list_add_tail(&q_alg->entry, &cp->alg_list);
3394 dev_info(&pdev->dev, "%s\n",
3395 q_alg->sha_alg.halg.base.cra_driver_name);
3396 }
3397 }
3398
3399 /* register crypto aead (hmac-sha1) algorithms the device supports */
3400 if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) {
3401 for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
3402 i++) {
3403 struct qcrypto_alg *q_alg;
3404
3405 q_alg = _qcrypto_cipher_alg_alloc(cp,
3406 &_qcrypto_aead_sha1_hmac_algos[i]);
3407 if (IS_ERR(q_alg)) {
3408 rc = PTR_ERR(q_alg);
3409 goto err;
3410 }
3411
3412 rc = crypto_register_alg(&q_alg->cipher_alg);
3413 if (rc) {
3414 dev_err(&pdev->dev,
3415 "%s alg registration failed\n",
3416 q_alg->cipher_alg.cra_driver_name);
3417 kfree(q_alg);
3418 } else {
3419 list_add_tail(&q_alg->entry, &cp->alg_list);
3420 dev_info(&pdev->dev, "%s\n",
3421 q_alg->cipher_alg.cra_driver_name);
3422 }
3423 }
3424 }
3425
3426 if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
3427 /* register crypto hmac algorithms the device supports */
3428 for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
3429 struct qcrypto_alg *q_alg = NULL;
3430
3431 q_alg = _qcrypto_sha_alg_alloc(cp,
3432 &_qcrypto_sha_hmac_algos[i]);
3433
3434 if (IS_ERR(q_alg)) {
3435 rc = PTR_ERR(q_alg);
3436 goto err;
3437 }
3438
3439 rc = crypto_register_ahash(&q_alg->sha_alg);
3440 if (rc) {
3441 dev_err(&pdev->dev,
3442 "%s alg registration failed\n",
3443 q_alg->sha_alg.halg.base.cra_driver_name);
3444 kfree(q_alg);
3445 } else {
3446 list_add_tail(&q_alg->entry, &cp->alg_list);
3447 dev_info(&pdev->dev, "%s\n",
3448 q_alg->sha_alg.halg.base.cra_driver_name);
3449 }
3450 }
3451 }
3452 /*
3453 * Register crypto cipher (aes-ccm) algorithms the
3454 * device supports
3455 */
3456 if (cp->ce_support.aes_ccm) {
3457 struct qcrypto_alg *q_alg;
3458
3459 q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
3460 if (IS_ERR(q_alg)) {
3461 rc = PTR_ERR(q_alg);
3462 goto err;
3463 }
3464 rc = crypto_register_alg(&q_alg->cipher_alg);
3465 if (rc) {
3466 dev_err(&pdev->dev, "%s alg registration failed\n",
3467 q_alg->cipher_alg.cra_driver_name);
3468 kfree(q_alg);
3469 } else {
3470 list_add_tail(&q_alg->entry, &cp->alg_list);
3471 dev_info(&pdev->dev, "%s\n",
3472 q_alg->cipher_alg.cra_driver_name);
3473 }
3474 }
3475
3476 return 0;
3477err:
3478 _qcrypto_remove(pdev);
3479 return rc;
3480};
3481
Mona Hossain92c2ef92012-07-05 09:38:17 -07003482
3483static struct of_device_id qcrypto_match[] = {
3484 { .compatible = "qcom,qcrypto",
3485 },
3486 {}
3487};
3488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003489static struct platform_driver _qualcomm_crypto = {
3490 .probe = _qcrypto_probe,
3491 .remove = _qcrypto_remove,
3492 .driver = {
3493 .owner = THIS_MODULE,
3494 .name = "qcrypto",
Mona Hossain92c2ef92012-07-05 09:38:17 -07003495 .of_match_table = qcrypto_match,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003496 },
3497};
3498
3499static int _debug_qcrypto[MAX_CRYPTO_DEVICE];
3500
3501static int _debug_stats_open(struct inode *inode, struct file *file)
3502{
3503 file->private_data = inode->i_private;
3504 return 0;
3505}
3506
3507static ssize_t _debug_stats_read(struct file *file, char __user *buf,
3508 size_t count, loff_t *ppos)
3509{
3510 int rc = -EINVAL;
3511 int qcrypto = *((int *) file->private_data);
3512 int len;
3513
3514 len = _disp_stats(qcrypto);
3515
3516 rc = simple_read_from_buffer((void __user *) buf, len,
3517 ppos, (void *) _debug_read_buf, len);
3518
3519 return rc;
3520}
3521
3522static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
3523 size_t count, loff_t *ppos)
3524{
3525
3526 int qcrypto = *((int *) file->private_data);
3527
3528 memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat));
3529 return count;
3530};
3531
3532static const struct file_operations _debug_stats_ops = {
3533 .open = _debug_stats_open,
3534 .read = _debug_stats_read,
3535 .write = _debug_stats_write,
3536};
3537
3538static int _qcrypto_debug_init(void)
3539{
3540 int rc;
3541 char name[DEBUG_MAX_FNAME];
3542 int i;
3543 struct dentry *dent;
3544
3545 _debug_dent = debugfs_create_dir("qcrypto", NULL);
3546 if (IS_ERR(_debug_dent)) {
3547 pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
3548 PTR_ERR(_debug_dent));
3549 return PTR_ERR(_debug_dent);
3550 }
3551
3552 for (i = 0; i < MAX_CRYPTO_DEVICE; i++) {
3553 snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1);
3554 _debug_qcrypto[i] = i;
3555 dent = debugfs_create_file(name, 0644, _debug_dent,
3556 &_debug_qcrypto[i], &_debug_stats_ops);
3557 if (dent == NULL) {
3558 pr_err("qcrypto debugfs_create_file fail, error %ld\n",
3559 PTR_ERR(dent));
3560 rc = PTR_ERR(dent);
3561 goto err;
3562 }
3563 }
3564 return 0;
3565err:
3566 debugfs_remove_recursive(_debug_dent);
3567 return rc;
3568}
3569
3570static int __init _qcrypto_init(void)
3571{
3572 int rc;
3573
3574 rc = _qcrypto_debug_init();
3575 if (rc)
3576 return rc;
3577
3578 return platform_driver_register(&_qualcomm_crypto);
3579}
3580
3581static void __exit _qcrypto_exit(void)
3582{
Ramesh Masavarapuc1d2b682011-09-07 14:57:58 -07003583 pr_debug("%s Unregister QCRYPTO\n", __func__);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003584 debugfs_remove_recursive(_debug_dent);
3585 platform_driver_unregister(&_qualcomm_crypto);
3586}
3587
3588module_init(_qcrypto_init);
3589module_exit(_qcrypto_exit);
3590
3591MODULE_LICENSE("GPL v2");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003592MODULE_DESCRIPTION("Qualcomm Crypto driver");