blob: 4ccd89d7d783bccfc86b4c95726f6738604c823a [file] [log] [blame]
Mona Hossainb43e94b2012-05-07 08:52:06 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/mod_devicetable.h>
20#include <linux/device.h>
21#include <linux/clk.h>
22#include <linux/err.h>
23#include <linux/dma-mapping.h>
24#include <linux/io.h>
25#include <linux/platform_device.h>
26#include <linux/spinlock.h>
27#include <linux/delay.h>
28#include <linux/crypto.h>
29#include <linux/qcedev.h>
30#include <linux/bitops.h>
31#include <crypto/hash.h>
32#include <crypto/sha.h>
33#include <mach/dma.h>
34#include <mach/clk.h>
35#include <mach/socinfo.h>
36
37#include "qce.h"
38#include "qce50.h"
39#include "qcryptohw_50.h"
40
41#define CRYPTO_CONFIG_RESET 0xE001F
42
43static DEFINE_MUTEX(bam_register_cnt);
44struct bam_registration_info {
45 uint32_t handle;
46 uint32_t cnt;
47};
48static struct bam_registration_info bam_registry;
49
50/*
51 * CE HW device structure.
52 * Each engine has an instance of the structure.
53 * Each engine can only handle one crypto operation at one time. It is up to
54 * the sw above to ensure single threading of operation on an engine.
55 */
56struct qce_device {
57 struct device *pdev; /* Handle to platform_device structure */
58
59 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
60 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
61 int memsize; /* Memory allocated */
62
63 void __iomem *iobase; /* Virtual io base of CE HW */
64 unsigned int phy_iobase; /* Physical io base of CE HW */
65
66 struct clk *ce_core_src_clk; /* Handle to CE src clk*/
67 struct clk *ce_core_clk; /* Handle to CE clk */
68 struct clk *ce_clk; /* Handle to CE clk */
69
70 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
71
72 int assoc_nents;
73 int ivsize;
74 int authsize;
75 int src_nents;
76 int dst_nents;
77
78 dma_addr_t phy_iv_in;
79
80 void *areq;
81 enum qce_cipher_mode_enum mode;
82 struct ce_sps_data ce_sps;
83};
84
85/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
86static uint32_t _std_init_vector_sha1[] = {
87 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
88};
89
90/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
91static uint32_t _std_init_vector_sha256[] = {
92 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
93 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
94};
95
96static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
97 unsigned int len)
98{
99 unsigned n;
100
101 n = len / sizeof(uint32_t) ;
102 for (; n > 0; n--) {
103 *iv = ((*b << 24) & 0xff000000) |
104 (((*(b+1)) << 16) & 0xff0000) |
105 (((*(b+2)) << 8) & 0xff00) |
106 (*(b+3) & 0xff);
107 b += sizeof(uint32_t);
108 iv++;
109 }
110
111 n = len % sizeof(uint32_t);
112 if (n == 3) {
113 *iv = ((*b << 24) & 0xff000000) |
114 (((*(b+1)) << 16) & 0xff0000) |
115 (((*(b+2)) << 8) & 0xff00) ;
116 } else if (n == 2) {
117 *iv = ((*b << 24) & 0xff000000) |
118 (((*(b+1)) << 16) & 0xff0000) ;
119 } else if (n == 1) {
120 *iv = ((*b << 24) & 0xff000000) ;
121 }
122}
123
124static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
125 unsigned int len)
126{
127 unsigned i, j;
128 unsigned char swap_iv[AES_IV_LENGTH];
129
130 memset(swap_iv, 0, AES_IV_LENGTH);
131 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
132 swap_iv[i] = b[j];
133 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
134}
135
136static int count_sg(struct scatterlist *sg, int nbytes)
137{
138 int i;
139
140 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
141 nbytes -= sg->length;
142 return i;
143}
144
145static int _probe_ce_engine(struct qce_device *pce_dev)
146{
147 unsigned int rev;
148 unsigned int maj_rev, min_rev, step_rev;
149
150 rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
151 mb();
152 maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
153 min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
154 step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
155
156 if ((maj_rev != 0x05) || (min_rev > 0x02) || (step_rev > 0x02)) {
157 pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
158 pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
159 return -EIO;
160 };
161 if ((min_rev > 0) && (step_rev != 0)) {
162 pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
163 pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
164 return -EIO;
165 };
166 pce_dev->ce_sps.minor_version = min_rev;
167
168 dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
169 maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
170
171 pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
172
173 dev_info(pce_dev->pdev,
174 "IO base, CE = 0x%x\n, "
175 "Consumer (IN) PIPE %d, "
176 "Producer (OUT) PIPE %d\n"
177 "IO base BAM = 0x%x\n"
178 "BAM IRQ %d\n",
179 (uint32_t) pce_dev->iobase,
180 pce_dev->ce_sps.dest_pipe_index,
181 pce_dev->ce_sps.src_pipe_index,
182 (uint32_t)pce_dev->ce_sps.bam_iobase,
183 pce_dev->ce_sps.bam_irq);
184 return 0;
185};
186
187static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev,
188 struct qce_sha_req *sreq,
189 struct qce_cmdlist_info **cmdplistinfo)
190{
191 struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
192
193 switch (sreq->alg) {
194 case QCE_HASH_SHA1:
195 *cmdplistinfo = &cmdlistptr->auth_sha1;
196 break;
197
198 case QCE_HASH_SHA256:
199 *cmdplistinfo = &cmdlistptr->auth_sha256;
200 break;
201
202 case QCE_HASH_SHA1_HMAC:
203 *cmdplistinfo = &cmdlistptr->auth_sha1_hmac;
204 break;
205
206 case QCE_HASH_SHA256_HMAC:
207 *cmdplistinfo = &cmdlistptr->auth_sha256_hmac;
208 break;
209
210 case QCE_HASH_AES_CMAC:
211 if (sreq->authklen == AES128_KEY_SIZE)
212 *cmdplistinfo = &cmdlistptr->auth_aes_128_cmac;
213 else
214 *cmdplistinfo = &cmdlistptr->auth_aes_256_cmac;
215 break;
216
217 default:
218 break;
219 }
220 return 0;
221}
222
223static int _ce_setup_hash(struct qce_device *pce_dev,
224 struct qce_sha_req *sreq,
225 struct qce_cmdlist_info *cmdlistinfo)
226{
227 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
228 uint32_t diglen;
229 int i;
230 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
231 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
232 bool sha1 = false;
233 struct sps_command_element *pce = NULL;
234
235 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
236 (sreq->alg == QCE_HASH_SHA256_HMAC) ||
237 (sreq->alg == QCE_HASH_AES_CMAC)) {
238 uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
239
240 _byte_stream_to_net_words(mackey32, sreq->authkey,
241 sreq->authklen);
242
243 /* check for null key. If null, use hw key*/
244 for (i = 0; i < authk_size_in_word; i++) {
245 if (mackey32[i] != 0)
246 break;
247 }
248
249 pce = cmdlistinfo->go_proc;
250 if (i == authk_size_in_word) {
251 pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
252 pce_dev->phy_iobase);
253 } else {
254 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
255 pce_dev->phy_iobase);
256 pce = cmdlistinfo->auth_key;
257 for (i = 0; i < authk_size_in_word; i++, pce++)
258 pce->data = mackey32[i];
259 }
260 }
261
262 if (sreq->alg == QCE_HASH_AES_CMAC)
263 goto go_proc;
264
265 /* if not the last, the size has to be on the block boundary */
266 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
267 return -EIO;
268
269 switch (sreq->alg) {
270 case QCE_HASH_SHA1:
271 case QCE_HASH_SHA1_HMAC:
272 diglen = SHA1_DIGEST_SIZE;
273 sha1 = true;
274 break;
275 case QCE_HASH_SHA256:
276 case QCE_HASH_SHA256_HMAC:
277 diglen = SHA256_DIGEST_SIZE;
278 break;
279 default:
280 return -EINVAL;
281 }
282
283 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
284 if (sreq->first_blk) {
285 if (sha1) {
286 for (i = 0; i < 5; i++)
287 auth32[i] = _std_init_vector_sha1[i];
288 } else {
289 for (i = 0; i < 8; i++)
290 auth32[i] = _std_init_vector_sha256[i];
291 }
292 } else {
293 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
294 }
295
296 pce = cmdlistinfo->auth_iv;
297 for (i = 0; i < 5; i++, pce++)
298 pce->data = auth32[i];
299
300 if ((sreq->alg == QCE_HASH_SHA256) ||
301 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
302 for (i = 5; i < 8; i++, pce++)
303 pce->data = auth32[i];
304 }
305
306 /* write auth_bytecnt 0/1, start with 0 */
307 pce = cmdlistinfo->auth_bytecount;
308 for (i = 0; i < 2; i++, pce++)
309 pce->data = sreq->auth_data[i];
310
311 /* Set/reset last bit in CFG register */
312 pce = cmdlistinfo->auth_seg_cfg;
313 if (sreq->last_blk)
314 pce->data |= 1 << CRYPTO_LAST;
315 else
316 pce->data &= ~(1 << CRYPTO_LAST);
317 if (sreq->first_blk)
318 pce->data |= 1 << CRYPTO_FIRST;
319 else
320 pce->data &= ~(1 << CRYPTO_FIRST);
321go_proc:
322 /* write auth seg size */
323 pce = cmdlistinfo->auth_seg_size;
324 pce->data = sreq->size;
325
326 /* write auth seg size start*/
327 pce = cmdlistinfo->auth_seg_start;
328 pce->data = 0;
329
330 /* write seg size */
331 pce = cmdlistinfo->seg_size;
332 pce->data = sreq->size;
333
334 return 0;
335}
336
337static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev,
338 struct qce_req *creq,
339 struct qce_cmdlist_info **cmdlistinfo)
340{
341 struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
342
343 if (creq->alg != CIPHER_ALG_AES) {
344 switch (creq->alg) {
345 case CIPHER_ALG_DES:
346 if (creq->mode == QCE_MODE_ECB)
347 *cmdlistinfo = &cmdlistptr->cipher_des_ecb;
348 else
349 *cmdlistinfo = &cmdlistptr->cipher_des_cbc;
350 break;
351
352 case CIPHER_ALG_3DES:
353 if (creq->mode == QCE_MODE_ECB)
354 *cmdlistinfo =
355 &cmdlistptr->cipher_3des_ecb;
356 else
357 *cmdlistinfo =
358 &cmdlistptr->cipher_3des_cbc;
359 break;
360 default:
361 break;
362 }
363 } else {
364 switch (creq->mode) {
365 case QCE_MODE_ECB:
366 if (creq->encklen == AES128_KEY_SIZE)
367 *cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb;
368 else
369 *cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb;
370 break;
371
372 case QCE_MODE_CBC:
373 case QCE_MODE_CTR:
374 if (creq->encklen == AES128_KEY_SIZE)
375 *cmdlistinfo =
376 &cmdlistptr->cipher_aes_128_cbc_ctr;
377 else
378 *cmdlistinfo =
379 &cmdlistptr->cipher_aes_256_cbc_ctr;
380 break;
381
382 case QCE_MODE_XTS:
383 if (creq->encklen == AES128_KEY_SIZE)
384 *cmdlistinfo = &cmdlistptr->cipher_aes_128_xts;
385 else
386 *cmdlistinfo = &cmdlistptr->cipher_aes_256_xts;
387 break;
388
389 case QCE_MODE_CCM:
390 if (creq->encklen == AES128_KEY_SIZE)
391 *cmdlistinfo = &cmdlistptr->aead_aes_128_ccm;
392 else
393 *cmdlistinfo = &cmdlistptr->aead_aes_256_ccm;
394 break;
395
396 default:
397 break;
398 }
399 }
400 return 0;
401}
402
403static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
404 uint32_t totallen_in, uint32_t coffset,
405 struct qce_cmdlist_info *cmdlistinfo)
406{
407 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
408 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
409 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
410 0, 0, 0, 0};
411 uint32_t enck_size_in_word = 0;
412 uint32_t key_size;
413 bool use_hw_key = false;
414 uint32_t encr_cfg = 0;
415 uint32_t ivsize = creq->ivsize;
416 int i;
417 struct sps_command_element *pce = NULL;
418
419 if (creq->mode == QCE_MODE_XTS)
420 key_size = creq->encklen/2;
421 else
422 key_size = creq->encklen;
423
424 _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
425
426 /* check for null key. If null, use hw key*/
427 enck_size_in_word = key_size/sizeof(uint32_t);
428 for (i = 0; i < enck_size_in_word; i++) {
429 if (enckey32[i] != 0)
430 break;
431 }
432 pce = cmdlistinfo->go_proc;
433 if (i == enck_size_in_word) {
434 use_hw_key = true;
435 pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
436 pce_dev->phy_iobase);
437 } else {
438 pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
439 pce_dev->phy_iobase);
440 }
441
442 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
443 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
444 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
445 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
446 uint32_t auth_cfg = 0;
447
448 /* write nonce */
449 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
450 pce = cmdlistinfo->auth_nonce_info;
451 for (i = 0; i < noncelen32; i++, pce++)
452 pce->data = nonce32[i];
453
454 /* TBD NEW FEATURE partial AES CCM pkt support set last bit */
455 auth_cfg |= ((1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST));
456 if (creq->dir == QCE_ENCRYPT)
457 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
458 else
459 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
460 auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
461 auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
462 if (creq->authklen == AES128_KEY_SIZE)
463 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
464 CRYPTO_AUTH_KEY_SIZE);
465 else {
466 if (creq->authklen == AES256_KEY_SIZE)
467 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
468 CRYPTO_AUTH_KEY_SIZE);
469 }
470 auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
471 auth_cfg |= ((MAX_NONCE/sizeof(uint32_t)) <<
472 CRYPTO_AUTH_NONCE_NUM_WORDS);
473
474 if (use_hw_key == true) {
475 auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
476 } else {
477 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
478 /* write auth key */
479 pce = cmdlistinfo->auth_key;
480 for (i = 0; i < authklen32; i++, pce++)
481 pce->data = enckey32[i];
482 }
483
484 pce = cmdlistinfo->auth_seg_cfg;
485 pce->data = auth_cfg;
486
487 pce = cmdlistinfo->auth_seg_size;
488 pce->data = totallen_in;
489 pce = cmdlistinfo->auth_seg_start;
490 pce->data = 0;
491 }
492
493 switch (creq->mode) {
494 case QCE_MODE_ECB:
495 encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
496 break;
497 case QCE_MODE_CBC:
498 encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
499 break;
500 case QCE_MODE_XTS:
501 encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
502 break;
503 case QCE_MODE_CCM:
504 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
505 break;
506 case QCE_MODE_CTR:
507 default:
508 encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
509 break;
510 }
511 pce_dev->mode = creq->mode;
512
513 switch (creq->alg) {
514 case CIPHER_ALG_DES:
515 if (creq->mode != QCE_MODE_ECB) {
516 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
517 pce = cmdlistinfo->encr_cntr_iv;
518 pce->data = enciv32[0];
519 pce++;
520 pce->data = enciv32[1];
521 }
522 if (use_hw_key == false) {
523 pce = cmdlistinfo->encr_key;
524 pce->data = enckey32[0];
525 pce++;
526 pce->data = enckey32[1];
527 }
528 break;
529 case CIPHER_ALG_3DES:
530 if (creq->mode != QCE_MODE_ECB) {
531 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
532 pce = cmdlistinfo->encr_cntr_iv;
533 pce->data = enciv32[0];
534 pce++;
535 pce->data = enciv32[1];
536 }
537 if (use_hw_key == false) {
538 /* write encr key */
539 pce = cmdlistinfo->encr_key;
540 for (i = 0; i < 6; i++, pce++)
541 pce->data = enckey32[i];
542 }
543 break;
544 case CIPHER_ALG_AES:
545 default:
546 if (creq->mode == QCE_MODE_XTS) {
547 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
548 = {0, 0, 0, 0, 0, 0, 0, 0};
549 uint32_t xtsklen =
550 creq->encklen/(2 * sizeof(uint32_t));
551
552 _byte_stream_to_net_words(xtskey32, (creq->enckey +
553 creq->encklen/2), creq->encklen/2);
554 /* write xts encr key */
555 pce = cmdlistinfo->encr_xts_key;
556 for (i = 0; i < xtsklen; i++, pce++)
557 pce->data = xtskey32[i];
558
559 /* write xts du size */
560 pce = cmdlistinfo->encr_xts_du_size;
561 pce->data = creq->cryptlen;
562 }
563 if (creq->mode != QCE_MODE_ECB) {
564 if (creq->mode == QCE_MODE_XTS)
565 _byte_stream_swap_to_net_words(enciv32,
566 creq->iv, ivsize);
567 else
568 _byte_stream_to_net_words(enciv32, creq->iv,
569 ivsize);
570 /* write encr cntr iv */
571 pce = cmdlistinfo->encr_cntr_iv;
572 for (i = 0; i < 4; i++, pce++)
573 pce->data = enciv32[i];
574
575 if (creq->mode == QCE_MODE_CCM) {
576 /* write cntr iv for ccm */
577 pce = cmdlistinfo->encr_ccm_cntr_iv;
578 for (i = 0; i < 4; i++, pce++)
579 pce->data = enciv32[i];
580 /* update cntr_iv[3] by one */
581 pce = cmdlistinfo->encr_cntr_iv;
582 pce += 3;
583 pce->data += 1;
584 }
585 }
586
587 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
588 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
589 CRYPTO_ENCR_KEY_SZ);
590 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
591 } else {
592 if (use_hw_key == false) {
593 /* write encr key */
594 pce = cmdlistinfo->encr_key;
595 for (i = 0; i < enck_size_in_word; i++, pce++)
596 pce->data = enckey32[i];
597 switch (key_size) {
598 case AES128_KEY_SIZE:
599 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128
600 << CRYPTO_ENCR_KEY_SZ);
601 break;
602 case AES256_KEY_SIZE:
603 default:
604 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256
605 << CRYPTO_ENCR_KEY_SZ);
606 break;
607 } /* end of switch (creq->encklen) */
608 }
609 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
610 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
611 break;
612 } /* end of switch (creq->mode) */
613
614 /* write encr seg cfg */
615 pce = cmdlistinfo->encr_seg_cfg;
616 if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
617 if (creq->dir == QCE_ENCRYPT)
618 pce->data |= (1 << CRYPTO_ENCODE);
619 else
620 pce->data &= ~(1 << CRYPTO_ENCODE);
621 encr_cfg = pce->data;
622 } else {
623 encr_cfg |=
624 ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
625 }
626 if (use_hw_key == true)
627 encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
628 else
629 encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
630 pce->data = encr_cfg;
631
632 /* write encr seg size */
633 pce = cmdlistinfo->encr_seg_size;
634 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
635 pce->data = (creq->cryptlen + creq->authsize);
636 else
637 pce->data = creq->cryptlen;
638
639 /* write encr seg start */
640 pce = cmdlistinfo->encr_seg_start;
641 pce->data = (coffset & 0xffff);
642
643 /* write seg size */
644 pce = cmdlistinfo->seg_size;
645 pce->data = totallen_in;
646
647 return 0;
648};
649
650static int _aead_complete(struct qce_device *pce_dev)
651{
652 struct aead_request *areq;
653 unsigned char mac[SHA256_DIGEST_SIZE];
654
655 areq = (struct aead_request *) pce_dev->areq;
656 if (areq->src != areq->dst) {
657 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
658 DMA_FROM_DEVICE);
659 }
660 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
661 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
662 DMA_TO_DEVICE);
663 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
664 DMA_TO_DEVICE);
665 /* check MAC */
666 memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
667 SHA256_DIGEST_SIZE);
668 if (pce_dev->mode == QCE_MODE_CCM) {
669 uint32_t result_status;
670 result_status = pce_dev->ce_sps.result->status;
671 result_status &= (1 << CRYPTO_MAC_FAILED);
672 result_status |= (pce_dev->ce_sps.consumer_status |
673 pce_dev->ce_sps.producer_status);
674 pce_dev->qce_cb(areq, mac, NULL, result_status);
675 } else {
676 uint32_t ivsize = 0;
677 struct crypto_aead *aead;
678 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
679
680 aead = crypto_aead_reqtfm(areq);
681 ivsize = crypto_aead_ivsize(aead);
682 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
683 ivsize, DMA_TO_DEVICE);
684 memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
685 sizeof(iv));
686 pce_dev->qce_cb(areq, mac, iv, pce_dev->ce_sps.consumer_status |
687 pce_dev->ce_sps.producer_status);
688
689 }
690 return 0;
691};
692
693static void _sha_complete(struct qce_device *pce_dev)
694{
695 struct ahash_request *areq;
696 unsigned char digest[SHA256_DIGEST_SIZE];
697
698 areq = (struct ahash_request *) pce_dev->areq;
699 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
700 DMA_TO_DEVICE);
701 memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
702 SHA256_DIGEST_SIZE);
703 pce_dev->qce_cb(areq, digest,
704 (char *)pce_dev->ce_sps.result->auth_byte_count,
705 pce_dev->ce_sps.consumer_status);
706};
707
708static int _ablk_cipher_complete(struct qce_device *pce_dev)
709{
710 struct ablkcipher_request *areq;
711 unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
712
713 areq = (struct ablkcipher_request *) pce_dev->areq;
714
715 if (areq->src != areq->dst) {
716 dma_unmap_sg(pce_dev->pdev, areq->dst,
717 pce_dev->dst_nents, DMA_FROM_DEVICE);
718 }
719 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
720 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
721 DMA_TO_DEVICE);
722
723 if (pce_dev->mode == QCE_MODE_ECB) {
724 pce_dev->qce_cb(areq, NULL, NULL,
725 pce_dev->ce_sps.consumer_status |
726 pce_dev->ce_sps.producer_status);
727 } else {
728 if (pce_dev->ce_sps.minor_version == 0) {
729 if (pce_dev->mode == QCE_MODE_CBC)
730 memcpy(iv, (char *)sg_virt(areq->src),
731 sizeof(iv));
732
733 if ((pce_dev->mode == QCE_MODE_CTR) ||
734 (pce_dev->mode == QCE_MODE_XTS)) {
735 uint32_t num_blk = 0;
736 uint32_t cntr_iv = 0;
737
738 memcpy(iv, areq->info, sizeof(iv));
739 if (pce_dev->mode == QCE_MODE_CTR)
740 num_blk = areq->nbytes/16;
741 cntr_iv = (u32)(((u32)(*(iv + 14))) << 8) |
742 (u32)(*(iv + 15));
743 *(iv + 14) = (char)((cntr_iv + num_blk) >> 8);
744 *(iv + 15) = (char)((cntr_iv + num_blk) & 0xFF);
745 }
746 } else {
747 memcpy(iv,
748 (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
749 sizeof(iv));
750 }
751 pce_dev->qce_cb(areq, NULL, iv,
752 pce_dev->ce_sps.consumer_status |
753 pce_dev->ce_sps.producer_status);
754 }
755 return 0;
756};
757
758#ifdef QCE_DEBUG
759static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
760{
761 int i, j, ents;
762 struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
763 uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD;
764
765 printk(KERN_INFO "==============================================\n");
766 printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
767 printk(KERN_INFO "==============================================\n");
768 for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) {
769 printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
770 iovec->addr, iovec->size, iovec->flags);
771 if (iovec->flags & cmd_flags) {
772 struct sps_command_element *pced;
773
774 pced = (struct sps_command_element *)
775 (GET_VIRT_ADDR(iovec->addr));
776 ents = iovec->size/(sizeof(struct sps_command_element));
777 for (j = 0; j < ents; j++) {
778 printk(KERN_INFO " [%d] [0x%x] 0x%x\n", j,
779 pced->addr, pced->data);
780 pced++;
781 }
782 }
783 iovec++;
784 }
785
786 printk(KERN_INFO "==============================================\n");
787 printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
788 printk(KERN_INFO "==============================================\n");
789 iovec = pce_dev->ce_sps.out_transfer.iovec;
790 for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) {
791 printk(KERN_INFO " [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
792 iovec->addr, iovec->size, iovec->flags);
793 iovec++;
794 }
795}
796
797#else
798static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
799{
800}
801#endif
802
803static void _qce_sps_iovec_count_init(struct qce_device *pce_dev)
804{
805 pce_dev->ce_sps.in_transfer.iovec_count = 0;
806 pce_dev->ce_sps.out_transfer.iovec_count = 0;
807}
808
809static void _qce_set_eot_flag(struct sps_transfer *sps_bam_pipe)
810{
811 struct sps_iovec *iovec = sps_bam_pipe->iovec +
812 (sps_bam_pipe->iovec_count - 1);
813 iovec->flags |= SPS_IOVEC_FLAG_EOT;
814}
815
816static void _qce_sps_add_data(uint32_t addr, uint32_t len,
817 struct sps_transfer *sps_bam_pipe)
818{
819 struct sps_iovec *iovec = sps_bam_pipe->iovec +
820 sps_bam_pipe->iovec_count;
821 if (len) {
822 iovec->size = len;
823 iovec->addr = addr;
824 iovec->flags = 0;
825 sps_bam_pipe->iovec_count++;
826 }
827}
828
829static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
830 struct scatterlist *sg_src, uint32_t nbytes,
831 struct sps_transfer *sps_bam_pipe)
832{
833 uint32_t addr, data_cnt, len;
834 struct sps_iovec *iovec = sps_bam_pipe->iovec +
835 sps_bam_pipe->iovec_count;
836
837 while (nbytes > 0) {
838 len = min(nbytes, sg_dma_len(sg_src));
839 nbytes -= len;
840 addr = sg_dma_address(sg_src);
841 if (pce_dev->ce_sps.minor_version == 0)
842 len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
843 while (len > 0) {
844 if (len > SPS_MAX_PKT_SIZE) {
845 data_cnt = SPS_MAX_PKT_SIZE;
846 iovec->size = data_cnt;
847 iovec->addr = addr;
848 iovec->flags = 0;
849 } else {
850 data_cnt = len;
851 iovec->size = data_cnt;
852 iovec->addr = addr;
853 iovec->flags = 0;
854 }
855 iovec++;
856 sps_bam_pipe->iovec_count++;
857 addr += data_cnt;
858 len -= data_cnt;
859 }
860 sg_src++;
861 }
862 return 0;
863}
864
865static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
866 struct qce_cmdlist_info *cmdptr,
867 struct sps_transfer *sps_bam_pipe)
868{
869 struct sps_iovec *iovec = sps_bam_pipe->iovec +
870 sps_bam_pipe->iovec_count;
871 iovec->size = cmdptr->size;
872 iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist);
873 iovec->flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD | flag;
874 sps_bam_pipe->iovec_count++;
875
876 return 0;
877}
878
879static int _qce_sps_transfer(struct qce_device *pce_dev)
880{
881 int rc = 0;
882
883 _qce_dump_descr_fifos(pce_dev);
884 rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
885 &pce_dev->ce_sps.in_transfer);
886 if (rc) {
887 pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
888 (u32)pce_dev->ce_sps.consumer.pipe, rc);
889 return rc;
890 }
891 rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
892 &pce_dev->ce_sps.out_transfer);
893 if (rc) {
894 pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
895 (u32)pce_dev->ce_sps.producer.pipe, rc);
896 return rc;
897 }
898 return rc;
899}
900
901/**
902 * Allocate and Connect a CE peripheral's SPS endpoint
903 *
904 * This function allocates endpoint context and
905 * connect it with memory endpoint by calling
906 * appropriate SPS driver APIs.
907 *
908 * Also registers a SPS callback function with
909 * SPS driver
910 *
911 * This function should only be called once typically
912 * during driver probe.
913 *
914 * @pce_dev - Pointer to qce_device structure
915 * @ep - Pointer to sps endpoint data structure
916 * @is_produce - 1 means Producer endpoint
917 * 0 means Consumer endpoint
918 *
919 * @return - 0 if successful else negative value.
920 *
921 */
922static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
923 struct qce_sps_ep_conn_data *ep,
924 bool is_producer)
925{
926 int rc = 0;
927 struct sps_pipe *sps_pipe_info;
928 struct sps_connect *sps_connect_info = &ep->connect;
929 struct sps_register_event *sps_event = &ep->event;
930
931 /* Allocate endpoint context */
932 sps_pipe_info = sps_alloc_endpoint();
933 if (!sps_pipe_info) {
934 pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
935 is_producer);
936 rc = -ENOMEM;
937 goto out;
938 }
939 /* Now save the sps pipe handle */
940 ep->pipe = sps_pipe_info;
941
942 /* Get default connection configuration for an endpoint */
943 rc = sps_get_config(sps_pipe_info, sps_connect_info);
944 if (rc) {
945 pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n",
946 (u32)sps_pipe_info, rc);
947 goto get_config_err;
948 }
949
950 /* Modify the default connection configuration */
951 if (is_producer) {
952 /*
953 * For CE producer transfer, source should be
954 * CE peripheral where as destination should
955 * be system memory.
956 */
957 sps_connect_info->source = pce_dev->ce_sps.bam_handle;
958 sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
959 /* Producer pipe will handle this connection */
960 sps_connect_info->mode = SPS_MODE_SRC;
961 sps_connect_info->options =
962 SPS_O_AUTO_ENABLE | SPS_O_EOT;
963 } else {
964 /* For CE consumer transfer, source should be
965 * system memory where as destination should
966 * CE peripheral
967 */
968 sps_connect_info->source = SPS_DEV_HANDLE_MEM;
969 sps_connect_info->destination = pce_dev->ce_sps.bam_handle;
970 sps_connect_info->mode = SPS_MODE_DEST;
971 sps_connect_info->options =
972 SPS_O_AUTO_ENABLE | SPS_O_EOT;
973 }
974
975 /* Producer pipe index */
976 sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index;
977 /* Consumer pipe index */
978 sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index;
979 sps_connect_info->event_thresh = 0x10;
980 /*
981 * Max. no of scatter/gather buffers that can
982 * be passed by block layer = 32 (NR_SG).
983 * Each BAM descritor needs 64 bits (8 bytes).
984 * One BAM descriptor is required per buffer transfer.
985 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
986 * But due to HW limitation we need to allocate atleast one extra
987 * descriptor memory (256 bytes + 8 bytes). But in order to be
988 * in power of 2, we are allocating 512 bytes of memory.
989 */
990 sps_connect_info->desc.size = 512;
991 sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
992 sps_connect_info->desc.size,
993 &sps_connect_info->desc.phys_base,
994 GFP_KERNEL);
995 if (sps_connect_info->desc.base == NULL) {
996 rc = -ENOMEM;
997 pr_err("Can not allocate coherent memory for sps data\n");
998 goto get_config_err;
999 }
1000
1001 memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
1002
1003 /* Establish connection between peripheral and memory endpoint */
1004 rc = sps_connect(sps_pipe_info, sps_connect_info);
1005 if (rc) {
1006 pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
1007 (u32)sps_pipe_info, rc);
1008 goto sps_connect_err;
1009 }
1010
1011 sps_event->mode = SPS_TRIGGER_CALLBACK;
1012 sps_event->options = SPS_O_EOT;
1013 sps_event->xfer_done = NULL;
1014 sps_event->user = (void *)pce_dev;
1015
1016 pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n",
1017 is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
1018 (u32)sps_pipe_info, sps_connect_info->desc.phys_base);
1019 goto out;
1020
1021sps_connect_err:
1022 dma_free_coherent(pce_dev->pdev,
1023 sps_connect_info->desc.size,
1024 sps_connect_info->desc.base,
1025 sps_connect_info->desc.phys_base);
1026get_config_err:
1027 sps_free_endpoint(sps_pipe_info);
1028out:
1029 return rc;
1030}
1031
1032/**
1033 * Disconnect and Deallocate a CE peripheral's SPS endpoint
1034 *
1035 * This function disconnect endpoint and deallocates
1036 * endpoint context.
1037 *
1038 * This function should only be called once typically
1039 * during driver remove.
1040 *
1041 * @pce_dev - Pointer to qce_device structure
1042 * @ep - Pointer to sps endpoint data structure
1043 *
1044 */
1045static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
1046 struct qce_sps_ep_conn_data *ep)
1047{
1048 struct sps_pipe *sps_pipe_info = ep->pipe;
1049 struct sps_connect *sps_connect_info = &ep->connect;
1050
1051 sps_disconnect(sps_pipe_info);
1052 dma_free_coherent(pce_dev->pdev,
1053 sps_connect_info->desc.size,
1054 sps_connect_info->desc.base,
1055 sps_connect_info->desc.phys_base);
1056 sps_free_endpoint(sps_pipe_info);
1057}
1058/**
1059 * Initialize SPS HW connected with CE core
1060 *
1061 * This function register BAM HW resources with
1062 * SPS driver and then initialize 2 SPS endpoints
1063 *
1064 * This function should only be called once typically
1065 * during driver probe.
1066 *
1067 * @pce_dev - Pointer to qce_device structure
1068 *
1069 * @return - 0 if successful else negative value.
1070 *
1071 */
1072static int qce_sps_init(struct qce_device *pce_dev)
1073{
1074 int rc = 0;
1075 struct sps_bam_props bam = {0};
1076 bool register_bam = false;
1077
1078 bam.phys_addr = pce_dev->ce_sps.bam_mem;
1079 bam.virt_addr = pce_dev->ce_sps.bam_iobase;
1080
1081 /*
1082 * This event thresold value is only significant for BAM-to-BAM
1083 * transfer. It's ignored for BAM-to-System mode transfer.
1084 */
1085 bam.event_threshold = 0x10; /* Pipe event threshold */
1086 /*
1087 * This threshold controls when the BAM publish
1088 * the descriptor size on the sideband interface.
1089 * SPS HW will only be used when
1090 * data transfer size > 64 bytes.
1091 */
1092 bam.summing_threshold = 64;
1093 /* SPS driver wll handle the crypto BAM IRQ */
1094 bam.irq = (u32)pce_dev->ce_sps.bam_irq;
1095 bam.manage = SPS_BAM_MGR_LOCAL;
1096
1097 pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
1098 pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
1099
1100 mutex_lock(&bam_register_cnt);
1101 if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
1102 /* Register CE Peripheral BAM device to SPS driver */
1103 rc = sps_register_bam_device(&bam, &bam_registry.handle);
1104 if (rc) {
1105 pr_err("sps_register_bam_device() failed! err=%d", rc);
1106 return -EIO;
1107 }
1108 bam_registry.cnt++;
1109 register_bam = true;
1110 } else {
1111 bam_registry.cnt++;
1112 }
1113 mutex_unlock(&bam_register_cnt);
1114 pce_dev->ce_sps.bam_handle = bam_registry.handle;
1115 pr_debug("BAM device registered. bam_handle=0x%x",
1116 pce_dev->ce_sps.bam_handle);
1117
1118 rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true);
1119 if (rc)
1120 goto sps_connect_producer_err;
1121 rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false);
1122 if (rc)
1123 goto sps_connect_consumer_err;
1124
1125 pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
1126 pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
1127 pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
1128 (unsigned long long)pce_dev->ce_sps.bam_mem,
1129 (unsigned int)pce_dev->ce_sps.bam_irq);
1130 return rc;
1131
1132sps_connect_consumer_err:
1133 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
1134sps_connect_producer_err:
1135 if (register_bam)
1136 sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
1137
1138 return rc;
1139}
1140
1141/**
1142 * De-initialize SPS HW connected with CE core
1143 *
1144 * This function deinitialize SPS endpoints and then
1145 * deregisters BAM resources from SPS driver.
1146 *
1147 * This function should only be called once typically
1148 * during driver remove.
1149 *
1150 * @pce_dev - Pointer to qce_device structure
1151 *
1152 */
1153static void qce_sps_exit(struct qce_device *pce_dev)
1154{
1155 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
1156 qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
1157 mutex_lock(&bam_register_cnt);
1158 if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
1159 sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
1160 bam_registry.cnt = 0;
1161 bam_registry.handle = 0;
1162 }
1163 if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
1164 bam_registry.cnt--;
1165 mutex_unlock(&bam_register_cnt);
1166
1167 iounmap(pce_dev->ce_sps.bam_iobase);
1168}
1169
1170static void _aead_sps_producer_callback(struct sps_event_notify *notify)
1171{
1172 struct qce_device *pce_dev = (struct qce_device *)
1173 ((struct sps_event_notify *)notify)->user;
1174
1175 pce_dev->ce_sps.notify = *notify;
1176 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1177 notify->event_id,
1178 notify->data.transfer.iovec.addr,
1179 notify->data.transfer.iovec.size,
1180 notify->data.transfer.iovec.flags);
1181
1182 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
1183 if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
1184 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1185 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1186
1187 /* done */
1188 _aead_complete(pce_dev);
1189 }
1190};
1191
1192static void _aead_sps_consumer_callback(struct sps_event_notify *notify)
1193{
1194 struct qce_device *pce_dev = (struct qce_device *)
1195 ((struct sps_event_notify *)notify)->user;
1196
1197 pce_dev->ce_sps.notify = *notify;
1198 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1199 notify->event_id,
1200 notify->data.transfer.iovec.addr,
1201 notify->data.transfer.iovec.size,
1202 notify->data.transfer.iovec.flags);
1203
1204 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
1205 if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
1206 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1207 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1208
1209 /* done */
1210 _aead_complete(pce_dev);
1211 }
1212};
1213
1214static void _sha_sps_producer_callback(struct sps_event_notify *notify)
1215{
1216 struct qce_device *pce_dev = (struct qce_device *)
1217 ((struct sps_event_notify *)notify)->user;
1218
1219 pce_dev->ce_sps.notify = *notify;
1220 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1221 notify->event_id,
1222 notify->data.transfer.iovec.addr,
1223 notify->data.transfer.iovec.size,
1224 notify->data.transfer.iovec.flags);
1225
1226 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
1227 if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
1228 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1229 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1230
1231 /* done */
1232 _sha_complete(pce_dev);
1233 }
1234};
1235
1236static void _sha_sps_consumer_callback(struct sps_event_notify *notify)
1237{
1238 struct qce_device *pce_dev = (struct qce_device *)
1239 ((struct sps_event_notify *)notify)->user;
1240
1241 pce_dev->ce_sps.notify = *notify;
1242 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1243 notify->event_id,
1244 notify->data.transfer.iovec.addr,
1245 notify->data.transfer.iovec.size,
1246 notify->data.transfer.iovec.flags);
1247
1248 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
1249 if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
1250 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1251 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1252
1253 /* done */
1254 _sha_complete(pce_dev);
1255 }
1256};
1257
1258static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
1259{
1260 struct qce_device *pce_dev = (struct qce_device *)
1261 ((struct sps_event_notify *)notify)->user;
1262
1263 pce_dev->ce_sps.notify = *notify;
1264 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1265 notify->event_id,
1266 notify->data.transfer.iovec.addr,
1267 notify->data.transfer.iovec.size,
1268 notify->data.transfer.iovec.flags);
1269
1270 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
1271 if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
1272 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1273 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1274
1275 /* done */
1276 _ablk_cipher_complete(pce_dev);
1277 }
1278};
1279
1280static void _ablk_cipher_sps_consumer_callback(struct sps_event_notify *notify)
1281{
1282 struct qce_device *pce_dev = (struct qce_device *)
1283 ((struct sps_event_notify *)notify)->user;
1284
1285 pce_dev->ce_sps.notify = *notify;
1286 pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1287 notify->event_id,
1288 notify->data.transfer.iovec.addr,
1289 notify->data.transfer.iovec.size,
1290 notify->data.transfer.iovec.flags);
1291
1292 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
1293 if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
1294 pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
1295 pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
1296
1297 /* done */
1298 _ablk_cipher_complete(pce_dev);
1299 }
1300};
1301
1302static void qce_add_cmd_element(struct qce_device *pdev,
1303 struct sps_command_element **cmd_ptr, u32 addr,
1304 u32 data, struct sps_command_element **populate)
1305{
1306 (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
1307 (*cmd_ptr)->data = data;
1308 (*cmd_ptr)->mask = 0xFFFFFFFF;
1309 if (populate != NULL)
1310 *populate = *cmd_ptr;
1311 (*cmd_ptr)++ ;
1312}
1313
1314static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
1315 unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
1316 bool key_128)
1317{
1318 struct sps_command_element *ce_vaddr =
1319 (struct sps_command_element *)(*pvaddr);
1320 uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
1321 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
1322 struct qce_cmdlist_info *pcl_info = NULL;
1323 int i = 0;
1324 uint32_t encr_cfg = 0;
1325 uint32_t key_reg = 0;
1326 uint32_t xts_key_reg = 0;
1327 uint32_t iv_reg = 0;
1328 uint32_t crypto_cfg = 0;
1329 uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
1330 uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
1331
1332 crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
1333 BIT(CRYPTO_MASK_DOUT_INTR) |
1334 BIT(CRYPTO_MASK_DIN_INTR) |
1335 BIT(CRYPTO_MASK_OP_DONE_INTR) |
1336 (0 << CRYPTO_HIGH_SPD_EN_N) |
1337 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
1338 /*
1339 * Designate chunks of the allocated memory to various
1340 * command list pointers related to AES cipher operations defined
1341 * in ce_cmdlistptrs_ops structure.
1342 */
1343 switch (mode) {
1344 case QCE_MODE_CBC:
1345 case QCE_MODE_CTR:
1346 if (key_128 == true) {
1347 cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
1348 (uint32_t)ce_vaddr;
1349 pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
1350
1351 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
1352 CRYPTO_ENCR_KEY_SZ) |
1353 (CRYPTO_ENCR_ALG_AES <<
1354 CRYPTO_ENCR_ALG);
1355 iv_reg = 4;
1356 key_reg = 4;
1357 xts_key_reg = 0;
1358 } else {
1359 cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
1360 (uint32_t)ce_vaddr;
1361 pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
1362
1363 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
1364 CRYPTO_ENCR_KEY_SZ) |
1365 (CRYPTO_ENCR_ALG_AES <<
1366 CRYPTO_ENCR_ALG);
1367 iv_reg = 4;
1368 key_reg = 8;
1369 xts_key_reg = 0;
1370 }
1371 break;
1372 case QCE_MODE_ECB:
1373 if (key_128 == true) {
1374 cmdlistptr->cipher_aes_128_ecb.cmdlist =
1375 (uint32_t)ce_vaddr;
1376 pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
1377
1378 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
1379 CRYPTO_ENCR_KEY_SZ) |
1380 (CRYPTO_ENCR_ALG_AES <<
1381 CRYPTO_ENCR_ALG) |
1382 (CRYPTO_ENCR_MODE_ECB <<
1383 CRYPTO_ENCR_MODE);
1384 iv_reg = 0;
1385 key_reg = 4;
1386 xts_key_reg = 0;
1387 } else {
1388 cmdlistptr->cipher_aes_256_ecb.cmdlist =
1389 (uint32_t)ce_vaddr;
1390 pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
1391
1392 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
1393 CRYPTO_ENCR_KEY_SZ) |
1394 (CRYPTO_ENCR_ALG_AES <<
1395 CRYPTO_ENCR_ALG) |
1396 (CRYPTO_ENCR_MODE_ECB <<
1397 CRYPTO_ENCR_MODE);
1398 iv_reg = 0;
1399 key_reg = 8;
1400 xts_key_reg = 0;
1401 }
1402 break;
1403 case QCE_MODE_XTS:
1404 if (key_128 == true) {
1405 cmdlistptr->cipher_aes_128_xts.cmdlist =
1406 (uint32_t)ce_vaddr;
1407 pcl_info = &(cmdlistptr->cipher_aes_128_xts);
1408
1409 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
1410 CRYPTO_ENCR_KEY_SZ) |
1411 (CRYPTO_ENCR_ALG_AES <<
1412 CRYPTO_ENCR_ALG) |
1413 (CRYPTO_ENCR_MODE_XTS <<
1414 CRYPTO_ENCR_MODE);
1415 iv_reg = 4;
1416 key_reg = 4;
1417 xts_key_reg = 4;
1418 } else {
1419 cmdlistptr->cipher_aes_256_xts.cmdlist =
1420 (uint32_t)ce_vaddr;
1421 pcl_info = &(cmdlistptr->cipher_aes_256_xts);
1422
1423 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
1424 CRYPTO_ENCR_KEY_SZ) |
1425 (CRYPTO_ENCR_ALG_AES <<
1426 CRYPTO_ENCR_ALG) |
1427 (CRYPTO_ENCR_MODE_XTS <<
1428 CRYPTO_ENCR_MODE);
1429 iv_reg = 4;
1430 key_reg = 8;
1431 xts_key_reg = 8;
1432 }
1433 break;
1434 default:
1435 break;
1436 }
1437
1438 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
1439 &pcl_info->crypto_cfg);
1440
1441 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
1442 &pcl_info->seg_size);
1443 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
1444 &pcl_info->encr_seg_cfg);
1445 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
1446 &pcl_info->encr_seg_size);
1447 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
1448 &pcl_info->encr_seg_start);
1449 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
1450 (uint32_t)0xffffffff, &pcl_info->encr_mask);
1451 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
1452 &pcl_info->auth_seg_cfg);
1453 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
1454 &pcl_info->encr_key);
1455 for (i = 1; i < key_reg; i++)
1456 qce_add_cmd_element(pdev, &ce_vaddr,
1457 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
1458 0, NULL);
1459 if (xts_key_reg) {
1460 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
1461 0, &pcl_info->encr_xts_key);
1462 for (i = 1; i < xts_key_reg; i++)
1463 qce_add_cmd_element(pdev, &ce_vaddr,
1464 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
1465 0, NULL);
1466 qce_add_cmd_element(pdev, &ce_vaddr,
1467 CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, NULL);
1468 }
1469 if (iv_reg) {
1470 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
1471 &pcl_info->encr_cntr_iv);
1472 for (i = 1; i < iv_reg; i++)
1473 qce_add_cmd_element(pdev, &ce_vaddr,
1474 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
1475 0, NULL);
1476 }
1477 /* Add dummy to align size to burst-size multiple */
1478 if (mode == QCE_MODE_XTS) {
1479 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
1480 0, &pcl_info->auth_seg_size);
1481 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1482 0, &pcl_info->auth_seg_size);
1483 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
1484 0, &pcl_info->auth_seg_size);
1485
1486 }
1487
1488 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
1489 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1490 &pcl_info->go_proc);
1491
1492 pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
1493 *pvaddr = (unsigned char *) ce_vaddr;
1494
1495 return 0;
1496}
1497
1498static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
1499 unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
1500 bool mode_cbc)
1501{
1502
1503 struct sps_command_element *ce_vaddr =
1504 (struct sps_command_element *)(*pvaddr);
1505 uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
1506 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
1507 struct qce_cmdlist_info *pcl_info = NULL;
1508 int i = 0;
1509 uint32_t encr_cfg = 0;
1510 uint32_t key_reg = 0;
1511 uint32_t iv_reg = 0;
1512 uint32_t crypto_cfg = 0;
1513 uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
1514 uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
1515
1516 crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
1517 BIT(CRYPTO_MASK_DOUT_INTR) |
1518 BIT(CRYPTO_MASK_DIN_INTR) |
1519 BIT(CRYPTO_MASK_OP_DONE_INTR) |
1520 (0 << CRYPTO_HIGH_SPD_EN_N) |
1521 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
1522
1523 /*
1524 * Designate chunks of the allocated memory to various
1525 * command list pointers related to cipher operations defined
1526 * in ce_cmdlistptrs_ops structure.
1527 */
1528 switch (alg) {
1529 case CIPHER_ALG_DES:
1530 if (mode_cbc) {
1531 cmdlistptr->cipher_des_cbc.cmdlist =
1532 (uint32_t)ce_vaddr;
1533 pcl_info = &(cmdlistptr->cipher_des_cbc);
1534
1535 encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
1536 CRYPTO_ENCR_KEY_SZ) |
1537 (CRYPTO_ENCR_ALG_DES <<
1538 CRYPTO_ENCR_ALG) |
1539 (CRYPTO_ENCR_MODE_CBC <<
1540 CRYPTO_ENCR_MODE);
1541 iv_reg = 2;
1542 key_reg = 2;
1543 } else {
1544 cmdlistptr->cipher_des_ecb.cmdlist =
1545 (uint32_t)ce_vaddr;
1546 pcl_info = &(cmdlistptr->cipher_des_ecb);
1547
1548 encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
1549 CRYPTO_ENCR_KEY_SZ) |
1550 (CRYPTO_ENCR_ALG_DES <<
1551 CRYPTO_ENCR_ALG) |
1552 (CRYPTO_ENCR_MODE_ECB <<
1553 CRYPTO_ENCR_MODE);
1554 iv_reg = 0;
1555 key_reg = 2;
1556 }
1557 break;
1558 case CIPHER_ALG_3DES:
1559 if (mode_cbc) {
1560 cmdlistptr->cipher_3des_cbc.cmdlist =
1561 (uint32_t)ce_vaddr;
1562 pcl_info = &(cmdlistptr->cipher_3des_cbc);
1563
1564 encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
1565 CRYPTO_ENCR_KEY_SZ) |
1566 (CRYPTO_ENCR_ALG_DES <<
1567 CRYPTO_ENCR_ALG) |
1568 (CRYPTO_ENCR_MODE_CBC <<
1569 CRYPTO_ENCR_MODE);
1570 iv_reg = 2;
1571 key_reg = 6;
1572 } else {
1573 cmdlistptr->cipher_3des_ecb.cmdlist =
1574 (uint32_t)ce_vaddr;
1575 pcl_info = &(cmdlistptr->cipher_3des_ecb);
1576
1577 encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
1578 CRYPTO_ENCR_KEY_SZ) |
1579 (CRYPTO_ENCR_ALG_DES <<
1580 CRYPTO_ENCR_ALG) |
1581 (CRYPTO_ENCR_MODE_ECB <<
1582 CRYPTO_ENCR_MODE);
1583 iv_reg = 0;
1584 key_reg = 6;
1585 }
1586 break;
1587 default:
1588 break;
1589 }
1590
1591 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
1592 &pcl_info->crypto_cfg);
1593
1594 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
1595 &pcl_info->seg_size);
1596 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
1597 &pcl_info->encr_seg_cfg);
1598 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
1599 &pcl_info->encr_seg_size);
1600 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
1601 &pcl_info->encr_seg_start);
1602 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
1603 &pcl_info->auth_seg_cfg);
1604
1605 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
1606 &pcl_info->encr_key);
1607 for (i = 1; i < key_reg; i++)
1608 qce_add_cmd_element(pdev, &ce_vaddr,
1609 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
1610 0, NULL);
1611 if (iv_reg) {
1612 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
1613 &pcl_info->encr_cntr_iv);
1614 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
1615 NULL);
1616 /* Add 2 dummy to align size to burst-size multiple */
1617 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR2_IV2_REG, 0,
1618 NULL);
1619 }
1620 /* Add dummy to align size to burst-size multiple */
1621 if (!mode_cbc) {
1622 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
1623 0, &pcl_info->auth_seg_size);
1624 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1625 0, &pcl_info->auth_seg_size);
1626 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
1627 0, &pcl_info->auth_seg_size);
1628
1629 }
1630
1631 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
1632 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1633 &pcl_info->go_proc);
1634
1635 pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
1636 *pvaddr = (unsigned char *) ce_vaddr;
1637
1638 return 0;
1639}
1640
1641static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
1642 unsigned char **pvaddr, enum qce_hash_alg_enum alg,
1643 bool key_128)
1644{
1645 struct sps_command_element *ce_vaddr =
1646 (struct sps_command_element *)(*pvaddr);
1647 uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
1648 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
1649 struct qce_cmdlist_info *pcl_info = NULL;
1650 int i = 0;
1651 uint32_t key_reg = 0;
1652 uint32_t auth_cfg = 0;
1653 uint32_t iv_reg = 0;
1654 uint32_t crypto_cfg = 0;
1655 uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
1656 uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
1657
1658 crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
1659 BIT(CRYPTO_MASK_DOUT_INTR) |
1660 BIT(CRYPTO_MASK_DIN_INTR) |
1661 BIT(CRYPTO_MASK_OP_DONE_INTR) |
1662 (0 << CRYPTO_HIGH_SPD_EN_N) |
1663 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
1664 /*
1665 * Designate chunks of the allocated memory to various
1666 * command list pointers related to authentication operations
1667 * defined in ce_cmdlistptrs_ops structure.
1668 */
1669 switch (alg) {
1670 case QCE_HASH_SHA1:
1671 cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr;
1672 pcl_info = &(cmdlistptr->auth_sha1);
1673
1674 auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
1675 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
1676 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
1677 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1678 iv_reg = 5;
1679 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1680 crypto_cfg, &pcl_info->crypto_cfg);
1681 /* 1 dummy write */
1682 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1683 0, NULL);
1684 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1685 0, NULL);
1686
1687 break;
1688 case QCE_HASH_SHA256:
1689 cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr;
1690 pcl_info = &(cmdlistptr->auth_sha256);
1691
1692 auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
1693 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
1694 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
1695 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1696 iv_reg = 8;
1697 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1698 crypto_cfg, &pcl_info->crypto_cfg);
1699 /* 2 dummy writes */
1700 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1701 0, NULL);
1702 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1703 0, NULL);
1704 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1705 0, NULL);
1706 break;
1707 case QCE_HASH_SHA1_HMAC:
1708 cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
1709 pcl_info = &(cmdlistptr->auth_sha1_hmac);
1710
1711 auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
1712 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
1713 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
1714 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1715 key_reg = 16;
1716 iv_reg = 5;
1717 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1718 crypto_cfg, &pcl_info->crypto_cfg);
1719 /* 1 dummy write */
1720 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1721 0, NULL);
1722 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1723 0, NULL);
1724 break;
1725 case QCE_AEAD_SHA1_HMAC:
1726 cmdlistptr->aead_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
1727 pcl_info = &(cmdlistptr->aead_sha1_hmac);
1728
1729 auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
1730 (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
1731 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
1732 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS) |
1733 (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
1734
1735 key_reg = 16;
1736 iv_reg = 5;
1737 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1738 crypto_cfg, &pcl_info->crypto_cfg);
1739 /* 2 dummy writes */
1740 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1741 0, NULL);
1742 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1743 0, NULL);
1744 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1745 0, NULL);
1746 break;
1747 case QCE_HASH_SHA256_HMAC:
1748 cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
1749 pcl_info = &(cmdlistptr->auth_sha256_hmac);
1750
1751 auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
1752 (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
1753 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
1754 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1755 key_reg = 16;
1756 iv_reg = 8;
1757
1758 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1759 crypto_cfg, &pcl_info->crypto_cfg);
1760 /* 2 dummy writes */
1761 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1762 0, NULL);
1763 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1764 0, NULL);
1765 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1766 0, NULL);
1767 break;
1768 case QCE_HASH_AES_CMAC:
1769 if (key_128 == true) {
1770 cmdlistptr->auth_aes_128_cmac.cmdlist =
1771 (uint32_t)ce_vaddr;
1772 pcl_info = &(cmdlistptr->auth_aes_128_cmac);
1773
1774 auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
1775 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
1776 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
1777 CRYPTO_AUTH_SIZE) |
1778 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
1779 (CRYPTO_AUTH_KEY_SZ_AES128 <<
1780 CRYPTO_AUTH_KEY_SIZE) |
1781 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1782 key_reg = 4;
1783 } else {
1784 cmdlistptr->auth_aes_256_cmac.cmdlist =
1785 (uint32_t)ce_vaddr;
1786 pcl_info = &(cmdlistptr->auth_aes_256_cmac);
1787
1788 auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST)|
1789 (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
1790 (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
1791 CRYPTO_AUTH_SIZE) |
1792 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
1793 (CRYPTO_AUTH_KEY_SZ_AES256 <<
1794 CRYPTO_AUTH_KEY_SIZE) |
1795 (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
1796 key_reg = 8;
1797 }
1798 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1799 crypto_cfg, &pcl_info->crypto_cfg);
1800 /* 2 dummy writes */
1801 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1802 0, NULL);
1803 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
1804 0, NULL);
1805 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1806 0, NULL);
1807 break;
1808 default:
1809 break;
1810 }
1811
1812 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
1813 &pcl_info->seg_size);
1814 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
1815 &pcl_info->encr_seg_cfg);
1816 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1817 auth_cfg, &pcl_info->auth_seg_cfg);
1818 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
1819 &pcl_info->auth_seg_size);
1820 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
1821 &pcl_info->auth_seg_start);
1822
1823 if (alg == QCE_HASH_AES_CMAC) {
1824 /* reset auth iv, bytecount and key registers */
1825 for (i = 0; i < 16; i++)
1826 qce_add_cmd_element(pdev, &ce_vaddr,
1827 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
1828 0, NULL);
1829 for (i = 0; i < 16; i++)
1830 qce_add_cmd_element(pdev, &ce_vaddr,
1831 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
1832 0, NULL);
1833 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
1834 0, NULL);
1835 } else {
1836 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
1837 &pcl_info->auth_iv);
1838 for (i = 1; i < iv_reg; i++)
1839 qce_add_cmd_element(pdev, &ce_vaddr,
1840 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
1841 0, NULL);
1842 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
1843 0, &pcl_info->auth_bytecount);
1844 }
1845 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
1846
1847 if (key_reg) {
1848 qce_add_cmd_element(pdev, &ce_vaddr,
1849 CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
1850 for (i = 1; i < key_reg; i++)
1851 qce_add_cmd_element(pdev, &ce_vaddr,
1852 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
1853 0, NULL);
1854 }
1855 if (alg != QCE_AEAD_SHA1_HMAC)
1856 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
1857 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1858 &pcl_info->go_proc);
1859
1860 pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
1861 *pvaddr = (unsigned char *) ce_vaddr;
1862
1863 return 0;
1864}
1865
1866static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
1867 unsigned char **pvaddr, bool key_128)
1868{
1869 struct sps_command_element *ce_vaddr =
1870 (struct sps_command_element *)(*pvaddr);
1871 uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
1872 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
1873 struct qce_cmdlist_info *pcl_info = NULL;
1874 int i = 0;
1875 uint32_t encr_cfg = 0;
1876 uint32_t auth_cfg = 0;
1877 uint32_t key_reg = 0;
1878 uint32_t crypto_cfg = 0;
1879 uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
1880 uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
1881
1882 crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
1883 BIT(CRYPTO_MASK_DOUT_INTR) |
1884 BIT(CRYPTO_MASK_DIN_INTR) |
1885 BIT(CRYPTO_MASK_OP_DONE_INTR) |
1886 (0 << CRYPTO_HIGH_SPD_EN_N) |
1887 (pipe_pair << CRYPTO_PIPE_SET_SELECT);
1888 /*
1889 * Designate chunks of the allocated memory to various
1890 * command list pointers related to aead operations
1891 * defined in ce_cmdlistptrs_ops structure.
1892 */
1893 if (key_128 == true) {
1894 cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr;
1895 pcl_info = &(cmdlistptr->aead_aes_128_ccm);
1896
1897 auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
1898 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
1899 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
1900 (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
1901 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
1902 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
1903 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
1904 ((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
1905 key_reg = 4;
1906 } else {
1907
1908 cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr;
1909 pcl_info = &(cmdlistptr->aead_aes_256_ccm);
1910
1911 auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
1912 (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
1913 (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
1914 (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
1915 ((MAX_NONCE/sizeof(uint32_t)) <<
1916 CRYPTO_AUTH_NONCE_NUM_WORDS);
1917 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
1918 encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
1919 (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
1920 ((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
1921 key_reg = 8;
1922 }
1923 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
1924 crypto_cfg, &pcl_info->crypto_cfg);
1925
1926 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL);
1927 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
1928 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
1929 NULL);
1930 /* add 1 dummy */
1931 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, NULL);
1932 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
1933 &pcl_info->seg_size);
1934 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
1935 encr_cfg, &pcl_info->encr_seg_cfg);
1936 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
1937 &pcl_info->encr_seg_size);
1938 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
1939 &pcl_info->encr_seg_start);
1940 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
1941 (uint32_t)0xffffffff, &pcl_info->encr_mask);
1942 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
1943 auth_cfg, &pcl_info->auth_seg_cfg);
1944 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
1945 &pcl_info->auth_seg_size);
1946 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
1947 &pcl_info->auth_seg_start);
1948 /* reset auth iv, bytecount and key registers */
1949 for (i = 0; i < 8; i++)
1950 qce_add_cmd_element(pdev, &ce_vaddr,
1951 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
1952 0, NULL);
1953 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
1954 0, NULL);
1955 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
1956 0, NULL);
1957 for (i = 0; i < 16; i++)
1958 qce_add_cmd_element(pdev, &ce_vaddr,
1959 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
1960 0, NULL);
1961 /* set auth key */
1962 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
1963 &pcl_info->auth_key);
1964 for (i = 1; i < key_reg; i++)
1965 qce_add_cmd_element(pdev, &ce_vaddr,
1966 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
1967 0, NULL);
1968 /* set NONCE info */
1969 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
1970 &pcl_info->auth_nonce_info);
1971 for (i = 1; i < 4; i++)
1972 qce_add_cmd_element(pdev, &ce_vaddr,
1973 (CRYPTO_AUTH_INFO_NONCE0_REG +
1974 i * sizeof(uint32_t)), 0, NULL);
1975
1976 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
1977 &pcl_info->encr_key);
1978 for (i = 1; i < key_reg; i++)
1979 qce_add_cmd_element(pdev, &ce_vaddr,
1980 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
1981 0, NULL);
1982 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
1983 &pcl_info->encr_cntr_iv);
1984 for (i = 1; i < 4; i++)
1985 qce_add_cmd_element(pdev, &ce_vaddr,
1986 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
1987 0, NULL);
1988 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
1989 &pcl_info->encr_ccm_cntr_iv);
1990 for (i = 1; i < 4; i++)
1991 qce_add_cmd_element(pdev, &ce_vaddr,
1992 (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
1993 0, NULL);
1994
1995 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
1996 ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
1997 &pcl_info->go_proc);
1998
1999 pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
2000 *pvaddr = (unsigned char *) ce_vaddr;
2001
2002 return 0;
2003}
2004
2005static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
2006 unsigned char **pvaddr)
2007{
2008 struct sps_command_element *ce_vaddr =
2009 (struct sps_command_element *)(*pvaddr);
2010 uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
2011 struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
2012 struct qce_cmdlist_info *pcl_info = NULL;
2013
2014 cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr;
2015 pcl_info = &(cmdlistptr->unlock_all_pipes);
2016
2017 /*
2018 * Designate chunks of the allocated memory to command list
2019 * to unlock pipes.
2020 */
2021 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
2022 CRYPTO_CONFIG_RESET, NULL);
2023 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
2024 CRYPTO_CONFIG_RESET, NULL);
2025 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
2026 CRYPTO_CONFIG_RESET, NULL);
2027 qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
2028 CRYPTO_CONFIG_RESET, NULL);
2029 pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
2030 *pvaddr = (unsigned char *) ce_vaddr;
2031
2032 return 0;
2033}
2034
2035static int qce_setup_cmdlistptrs(struct qce_device *pdev,
2036 unsigned char **pvaddr)
2037{
2038 struct sps_command_element *ce_vaddr =
2039 (struct sps_command_element *)(*pvaddr);
2040 /*
2041 * Designate chunks of the allocated memory to various
2042 * command list pointers related to operations defined
2043 * in ce_cmdlistptrs_ops structure.
2044 */
2045 ce_vaddr =
2046 (struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr),
2047 16);
2048 *pvaddr = (unsigned char *) ce_vaddr;
2049
2050 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true);
2051 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true);
2052 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true);
2053 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true);
2054 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false);
2055 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false);
2056 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false);
2057 _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false);
2058
2059 _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true);
2060 _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false);
2061 _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true);
2062 _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false);
2063
2064 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false);
2065 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false);
2066
2067 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false);
2068 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false);
2069
2070 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true);
2071 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false);
2072
2073 _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_AEAD_SHA1_HMAC, false);
2074
2075 _setup_aead_cmdlistptrs(pdev, pvaddr, true);
2076 _setup_aead_cmdlistptrs(pdev, pvaddr, false);
2077 _setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
2078
2079 return 0;
2080}
2081
2082static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
2083{
2084 unsigned char *vaddr;
2085
2086 vaddr = pce_dev->coh_vmem;
2087 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
2088
2089 /* Allow for 256 descriptor (cmd and data) entries per pipe */
2090 pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
2091 pce_dev->ce_sps.in_transfer.iovec_phys =
2092 (uint32_t)GET_PHYS_ADDR(vaddr);
2093 vaddr += MAX_BAM_DESCRIPTORS * 8;
2094
2095 pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
2096 pce_dev->ce_sps.out_transfer.iovec_phys =
2097 (uint32_t)GET_PHYS_ADDR(vaddr);
2098 vaddr += MAX_BAM_DESCRIPTORS * 8;
2099
2100 qce_setup_cmdlistptrs(pce_dev, &vaddr);
2101 pce_dev->ce_sps.result_dump = (uint32_t)vaddr;
2102 pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr;
2103 vaddr += 128;
2104
2105 return 0;
2106}
2107
2108int qce_aead_sha1_hmac_setup(struct qce_req *creq, struct crypto_aead *aead,
2109 struct qce_cmdlist_info *cmdlistinfo)
2110{
2111 uint32_t authk_size_in_word = creq->authklen/sizeof(uint32_t);
2112 uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
2113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2114 struct sps_command_element *pce = NULL;
2115 struct aead_request *areq = (struct aead_request *)creq->areq;
2116 int i;
2117
2118 _byte_stream_to_net_words(mackey32, creq->authkey,
2119 creq->authklen);
2120 pce = cmdlistinfo->auth_key;
2121 for (i = 0; i < authk_size_in_word; i++, pce++)
2122 pce->data = mackey32[i];
2123 pce = cmdlistinfo->auth_iv;
2124 for (i = 0; i < 5; i++, pce++)
2125 pce->data = _std_init_vector_sha1[i];
2126 /* write auth seg size */
2127 pce = cmdlistinfo->auth_seg_size;
2128 pce->data = creq->cryptlen + areq->assoclen + crypto_aead_ivsize(aead);
2129
2130 /* write auth seg size start*/
2131 pce = cmdlistinfo->auth_seg_start;
2132 pce->data = 0;
2133
2134 return 0;
2135}
2136
2137int qce_aead_req(void *handle, struct qce_req *q_req)
2138{
2139 struct qce_device *pce_dev = (struct qce_device *) handle;
2140 struct aead_request *areq = (struct aead_request *) q_req->areq;
2141 uint32_t authsize = q_req->authsize;
2142 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2143 uint32_t ivsize = 0;
2144 uint32_t totallen_in, out_len;
2145 uint32_t hw_pad_out = 0;
2146 int rc = 0;
2147 int ce_burst_size;
2148 struct qce_cmdlist_info *cmdlistinfo = NULL;
2149 struct qce_cmdlist_info *auth_cmdlistinfo = NULL;
2150
2151 if (q_req->mode != QCE_MODE_CCM)
2152 ivsize = crypto_aead_ivsize(aead);
2153
2154 ce_burst_size = pce_dev->ce_sps.ce_burst_size;
2155 if (q_req->dir == QCE_ENCRYPT) {
2156 q_req->cryptlen = areq->cryptlen;
2157 totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
2158 if (q_req->mode == QCE_MODE_CCM) {
2159 out_len = areq->cryptlen + authsize;
2160 hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
2161 } else {
2162 out_len = areq->cryptlen;
2163 }
2164 } else {
2165 q_req->cryptlen = areq->cryptlen - authsize;
2166 if (q_req->mode == QCE_MODE_CCM)
2167 totallen_in = areq->cryptlen + areq->assoclen;
2168 else
2169 totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
2170 out_len = q_req->cryptlen;
2171 hw_pad_out = authsize;
2172 }
2173
2174 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
2175 pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
2176 pce_dev->ivsize = q_req->ivsize;
2177 pce_dev->authsize = q_req->authsize;
2178 pce_dev->phy_iv_in = 0;
2179
2180 /* associated data input */
2181 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2182 DMA_TO_DEVICE);
2183 /* cipher input */
2184 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2185 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2186 DMA_TO_DEVICE);
2187 /* cipher + mac output for encryption */
2188 if (areq->src != areq->dst) {
2189 pce_dev->dst_nents = count_sg(areq->dst, out_len);
2190 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2191 DMA_FROM_DEVICE);
2192 } else {
2193 pce_dev->dst_nents = pce_dev->src_nents;
2194 }
2195
2196 _ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo);
2197 /* set up crypto device */
2198 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
2199 areq->assoclen + ivsize, cmdlistinfo);
2200 if (rc < 0)
2201 goto bad;
2202
2203 if (q_req->mode != QCE_MODE_CCM) {
2204 rc = qce_aead_sha1_hmac_setup(q_req, aead, auth_cmdlistinfo);
2205 if (rc < 0)
2206 goto bad;
2207 /* overwrite seg size */
2208 cmdlistinfo->seg_size->data = totallen_in;
2209 /* cipher iv for input */
2210 pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
2211 ivsize, DMA_TO_DEVICE);
2212 }
2213
2214 /* setup for callback, and issue command to bam */
2215 pce_dev->areq = q_req->areq;
2216 pce_dev->qce_cb = q_req->qce_cb;
2217
2218 /* Register callback event for EOT (End of transfer) event. */
2219 pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
2220 rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
2221 &pce_dev->ce_sps.producer.event);
2222 if (rc) {
2223 pr_err("Producer callback registration failed rc = %d\n", rc);
2224 goto bad;
2225 }
2226
2227 /* Register callback event for EOT (End of transfer) event. */
2228 pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
2229 rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
2230 &pce_dev->ce_sps.consumer.event);
2231 if (rc) {
2232 pr_err("Consumer callback registration failed rc = %d\n", rc);
2233 goto bad;
2234 }
2235
2236 _qce_sps_iovec_count_init(pce_dev);
2237
2238 _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
2239 &pce_dev->ce_sps.in_transfer);
2240
2241 if (pce_dev->ce_sps.minor_version == 0) {
2242 _qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
2243 &pce_dev->ce_sps.in_transfer);
2244
2245 _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
2246 _qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
2247 areq->assoclen + hw_pad_out,
2248 &pce_dev->ce_sps.out_transfer);
2249 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
2250 CRYPTO_RESULT_DUMP_SIZE,
2251 &pce_dev->ce_sps.out_transfer);
2252 } else {
2253 _qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
2254 &pce_dev->ce_sps.in_transfer);
2255 _qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
2256 &pce_dev->ce_sps.in_transfer);
2257 _qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
2258 &pce_dev->ce_sps.in_transfer);
2259 _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
2260
2261 /* Pass through to ignore associated (+iv, if applicable) data*/
2262 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
2263 (ivsize + areq->assoclen),
2264 &pce_dev->ce_sps.out_transfer);
2265 _qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
2266 &pce_dev->ce_sps.out_transfer);
2267 /* Pass through to ignore hw_pad (padding of the MAC data) */
2268 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
2269 hw_pad_out, &pce_dev->ce_sps.out_transfer);
2270
2271 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
2272 CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer);
2273 }
2274 rc = _qce_sps_transfer(pce_dev);
2275 if (rc)
2276 goto bad;
2277 return 0;
2278
2279bad:
2280 if (pce_dev->assoc_nents) {
2281 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2282 DMA_TO_DEVICE);
2283 }
2284 if (pce_dev->src_nents) {
2285 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2286 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2287 DMA_TO_DEVICE);
2288 }
2289 if (areq->src != areq->dst) {
2290 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2291 DMA_FROM_DEVICE);
2292 }
2293 if (pce_dev->phy_iv_in) {
2294 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
2295 ivsize, DMA_TO_DEVICE);
2296 }
2297
2298 return rc;
2299}
2300EXPORT_SYMBOL(qce_aead_req);
2301
2302int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
2303{
2304 int rc = 0;
2305 struct qce_device *pce_dev = (struct qce_device *) handle;
2306 struct ablkcipher_request *areq = (struct ablkcipher_request *)
2307 c_req->areq;
2308 struct qce_cmdlist_info *cmdlistinfo = NULL;
2309
2310 pce_dev->src_nents = 0;
2311 pce_dev->dst_nents = 0;
2312 _ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo);
2313
2314 /* cipher input */
2315 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
2316
2317 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2318 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2319 DMA_TO_DEVICE);
2320 /* cipher output */
2321 if (areq->src != areq->dst) {
2322 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
2323 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2324 DMA_FROM_DEVICE);
2325 } else {
2326 pce_dev->dst_nents = pce_dev->src_nents;
2327 }
2328 /* set up crypto device */
2329 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, cmdlistinfo);
2330 if (rc < 0)
2331 goto bad;
2332
2333 /* setup for client callback, and issue command to BAM */
2334 pce_dev->areq = areq;
2335 pce_dev->qce_cb = c_req->qce_cb;
2336
2337 /* Register callback event for EOT (End of transfer) event. */
2338 pce_dev->ce_sps.producer.event.callback =
2339 _ablk_cipher_sps_producer_callback;
2340 rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
2341 &pce_dev->ce_sps.producer.event);
2342 if (rc) {
2343 pr_err("Producer callback registration failed rc = %d\n", rc);
2344 goto bad;
2345 }
2346
2347 /* Register callback event for EOT (End of transfer) event. */
2348 pce_dev->ce_sps.consumer.event.callback =
2349 _ablk_cipher_sps_consumer_callback;
2350 rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
2351 &pce_dev->ce_sps.consumer.event);
2352 if (rc) {
2353 pr_err("Consumer callback registration failed rc = %d\n", rc);
2354 goto bad;
2355 }
2356
2357 _qce_sps_iovec_count_init(pce_dev);
2358
2359 _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
2360 &pce_dev->ce_sps.in_transfer);
2361 _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
2362 &pce_dev->ce_sps.in_transfer);
2363 _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
2364
2365 _qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
2366 &pce_dev->ce_sps.out_transfer);
2367 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
2368 CRYPTO_RESULT_DUMP_SIZE,
2369 &pce_dev->ce_sps.out_transfer);
2370 rc = _qce_sps_transfer(pce_dev);
2371 if (rc)
2372 goto bad;
2373 return 0;
2374bad:
2375 if (pce_dev->dst_nents) {
2376 dma_unmap_sg(pce_dev->pdev, areq->dst,
2377 pce_dev->dst_nents, DMA_FROM_DEVICE);
2378 }
2379 if (pce_dev->src_nents) {
2380 dma_unmap_sg(pce_dev->pdev, areq->src,
2381 pce_dev->src_nents,
2382 (areq->src == areq->dst) ?
2383 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2384 }
2385 return rc;
2386}
2387EXPORT_SYMBOL(qce_ablk_cipher_req);
2388
2389int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
2390{
2391 struct qce_device *pce_dev = (struct qce_device *) handle;
2392 int rc;
2393
2394 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
2395 struct qce_cmdlist_info *cmdlistinfo = NULL;
2396
2397 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
2398 _ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo);
2399 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
2400 DMA_TO_DEVICE);
2401 rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
2402 if (rc < 0)
2403 goto bad;
2404
2405 pce_dev->areq = areq;
2406 pce_dev->qce_cb = sreq->qce_cb;
2407
2408 /* Register callback event for EOT (End of transfer) event. */
2409 pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
2410 rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
2411 &pce_dev->ce_sps.producer.event);
2412 if (rc) {
2413 pr_err("Producer callback registration failed rc = %d\n", rc);
2414 goto bad;
2415 }
2416
2417 /* Register callback event for EOT (End of transfer) event. */
2418 pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
2419 rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
2420 &pce_dev->ce_sps.consumer.event);
2421 if (rc) {
2422 pr_err("Consumer callback registration failed rc = %d\n", rc);
2423 goto bad;
2424 }
2425
2426 _qce_sps_iovec_count_init(pce_dev);
2427
2428 _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
2429 &pce_dev->ce_sps.in_transfer);
2430 _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
2431 &pce_dev->ce_sps.in_transfer);
2432 _qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
2433
2434 _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
2435 CRYPTO_RESULT_DUMP_SIZE,
2436 &pce_dev->ce_sps.out_transfer);
2437 rc = _qce_sps_transfer(pce_dev);
2438 if (rc)
2439 goto bad;
2440 return 0;
2441bad:
2442 if (pce_dev->src_nents) {
2443 dma_unmap_sg(pce_dev->pdev, sreq->src,
2444 pce_dev->src_nents, DMA_TO_DEVICE);
2445 }
2446 return rc;
2447}
2448EXPORT_SYMBOL(qce_process_sha_req);
2449
2450static int __qce_get_device_tree_data(struct platform_device *pdev,
2451 struct qce_device *pce_dev)
2452{
2453 struct resource *resource;
2454 int rc = 0;
2455
2456 if (of_property_read_u32((&pdev->dev)->of_node,
2457 "qcom,bam-pipe-pair",
2458 &pce_dev->ce_sps.pipe_pair_index)) {
2459 pr_err("Fail to get bam pipe pair information.\n");
2460 return -EINVAL;
2461 } else {
2462 pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index);
2463 }
2464 pce_dev->ce_sps.dest_pipe_index = 2 * pce_dev->ce_sps.pipe_pair_index;
2465 pce_dev->ce_sps.src_pipe_index = pce_dev->ce_sps.dest_pipe_index + 1;
2466
2467 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2468 "crypto-base");
2469 if (resource) {
2470 pce_dev->phy_iobase = resource->start;
2471 pce_dev->iobase = ioremap_nocache(resource->start,
2472 resource_size(resource));
2473 if (!pce_dev->iobase) {
2474 pr_err("Can not map CRYPTO io memory\n");
2475 return -ENOMEM;
2476 }
2477 } else {
2478 pr_err("CRYPTO HW mem unavailable.\n");
2479 return -ENODEV;
2480 }
2481 pr_warn("ce_phy_reg_base=0x%x ", pce_dev->phy_iobase);
2482 pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase);
2483
2484 resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2485 "crypto-bam-base");
2486 if (resource) {
2487 pce_dev->ce_sps.bam_mem = resource->start;
2488 pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
2489 resource_size(resource));
2490 if (!pce_dev->iobase) {
2491 rc = -ENOMEM;
2492 pr_err("Can not map BAM io memory\n");
2493 goto err_getting_bam_info;
2494 }
2495 } else {
2496 pr_err("CRYPTO BAM mem unavailable.\n");
2497 rc = -ENODEV;
2498 goto err_getting_bam_info;
2499 }
2500 pr_warn("ce_bam_phy_reg_base=0x%x ", pce_dev->ce_sps.bam_mem);
2501 pr_warn("ce_bam_virt_reg_base=0x%x\n",
2502 (uint32_t)pce_dev->ce_sps.bam_iobase);
2503
2504 resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2505 if (resource) {
2506 pce_dev->ce_sps.bam_irq = resource->start;
2507 pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq);
2508 } else {
2509 pr_err("CRYPTO BAM IRQ unavailable.\n");
2510 goto err_dev;
2511 }
2512 return rc;
2513err_dev:
2514 if (pce_dev->ce_sps.bam_iobase)
2515 iounmap(pce_dev->ce_sps.bam_iobase);
2516
2517err_getting_bam_info:
2518 if (pce_dev->iobase)
2519 iounmap(pce_dev->iobase);
2520
2521 return rc;
2522}
2523
2524static int __qce_init_clk(struct qce_device *pce_dev)
2525{
2526 int rc = 0;
2527 struct clk *ce_core_clk;
2528 struct clk *ce_clk;
2529 struct clk *ce_core_src_clk;
2530
2531 /* Get CE3 src core clk. */
2532 ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
2533 if (!IS_ERR(ce_core_src_clk)) {
2534 pce_dev->ce_core_src_clk = ce_core_src_clk;
2535
2536 /* Set the core src clk @100Mhz */
2537 rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000);
2538 if (rc) {
2539 clk_put(pce_dev->ce_core_src_clk);
2540 pr_err("Unable to set the core src clk @100Mhz.\n");
2541 goto err_clk;
2542 }
2543 } else {
2544 pr_warn("Unable to get CE core src clk, set to NULL\n");
2545 pce_dev->ce_core_src_clk = NULL;
2546 }
2547
2548 /* Get CE core clk */
2549 ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
2550 if (IS_ERR(ce_core_clk)) {
2551 rc = PTR_ERR(ce_core_clk);
2552 pr_err("Unable to get CE core clk\n");
2553 if (pce_dev->ce_core_src_clk != NULL)
2554 clk_put(pce_dev->ce_core_src_clk);
2555 goto err_clk;
2556 }
2557 pce_dev->ce_core_clk = ce_core_clk;
2558
2559 /* Get CE Interface clk */
2560 ce_clk = clk_get(pce_dev->pdev, "iface_clk");
2561 if (IS_ERR(ce_clk)) {
2562 rc = PTR_ERR(ce_clk);
2563 pr_err("Unable to get CE interface clk\n");
2564 if (pce_dev->ce_core_src_clk != NULL)
2565 clk_put(pce_dev->ce_core_src_clk);
2566 clk_put(pce_dev->ce_core_clk);
2567 goto err_clk;
2568 }
2569 pce_dev->ce_clk = ce_clk;
2570
2571 /* Enable CE core clk */
2572 rc = clk_prepare_enable(pce_dev->ce_core_clk);
2573 if (rc) {
2574 pr_err("Unable to enable/prepare CE core clk\n");
2575 if (pce_dev->ce_core_src_clk != NULL)
2576 clk_put(pce_dev->ce_core_src_clk);
2577 clk_put(pce_dev->ce_core_clk);
2578 clk_put(pce_dev->ce_clk);
2579 goto err_clk;
2580 } else {
2581 /* Enable CE clk */
2582 rc = clk_prepare_enable(pce_dev->ce_clk);
2583 if (rc) {
2584 pr_err("Unable to enable/prepare CE iface clk\n");
2585 clk_disable_unprepare(pce_dev->ce_core_clk);
2586 if (pce_dev->ce_core_src_clk != NULL)
2587 clk_put(pce_dev->ce_core_src_clk);
2588 clk_put(pce_dev->ce_core_clk);
2589 clk_put(pce_dev->ce_clk);
2590 goto err_clk;
2591 }
2592 }
2593err_clk:
2594 if (rc)
2595 pr_err("Unable to init CE clks, rc = %d\n", rc);
2596 return rc;
2597}
2598
2599/* crypto engine open function. */
2600void *qce_open(struct platform_device *pdev, int *rc)
2601{
2602 struct qce_device *pce_dev;
2603
2604 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
2605 if (!pce_dev) {
2606 *rc = -ENOMEM;
2607 pr_err("Can not allocate memory: %d\n", *rc);
2608 return NULL;
2609 }
2610 pce_dev->pdev = &pdev->dev;
2611
2612 if (pdev->dev.of_node) {
2613 *rc = __qce_get_device_tree_data(pdev, pce_dev);
2614 if (*rc)
2615 goto err_pce_dev;
2616 } else {
2617 *rc = -EINVAL;
2618 pr_err("Device Node not found.\n");
2619 goto err_pce_dev;
2620 }
2621
2622 pce_dev->memsize = 9 * PAGE_SIZE;
2623 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
2624 pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
2625 if (pce_dev->coh_vmem == NULL) {
2626 *rc = -ENOMEM;
2627 pr_err("Can not allocate coherent memory for sps data\n");
2628 goto err_iobase;
2629 }
2630
2631 *rc = __qce_init_clk(pce_dev);
2632 if (*rc)
2633 goto err_mem;
2634
2635 if (_probe_ce_engine(pce_dev)) {
2636 *rc = -ENXIO;
2637 goto err;
2638 }
2639 *rc = 0;
2640 qce_setup_ce_sps_data(pce_dev);
2641 qce_sps_init(pce_dev);
2642
2643 return pce_dev;
2644err:
2645 clk_disable_unprepare(pce_dev->ce_clk);
2646 clk_disable_unprepare(pce_dev->ce_core_clk);
2647
2648 if (pce_dev->ce_core_src_clk != NULL)
2649 clk_put(pce_dev->ce_core_src_clk);
2650 clk_put(pce_dev->ce_clk);
2651 clk_put(pce_dev->ce_core_clk);
2652err_mem:
2653 if (pce_dev->coh_vmem)
2654 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
2655 pce_dev->coh_vmem, pce_dev->coh_pmem);
2656err_iobase:
2657 if (pce_dev->ce_sps.bam_iobase)
2658 iounmap(pce_dev->ce_sps.bam_iobase);
2659 if (pce_dev->iobase)
2660 iounmap(pce_dev->iobase);
2661err_pce_dev:
2662 kfree(pce_dev);
2663 return NULL;
2664}
2665EXPORT_SYMBOL(qce_open);
2666
2667/* crypto engine close function. */
2668int qce_close(void *handle)
2669{
2670 struct qce_device *pce_dev = (struct qce_device *) handle;
2671
2672 if (handle == NULL)
2673 return -ENODEV;
2674
2675 if (pce_dev->iobase)
2676 iounmap(pce_dev->iobase);
2677 if (pce_dev->coh_vmem)
2678 dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
2679 pce_dev->coh_vmem, pce_dev->coh_pmem);
2680
2681 clk_disable_unprepare(pce_dev->ce_clk);
2682 clk_disable_unprepare(pce_dev->ce_core_clk);
2683 if (pce_dev->ce_core_src_clk != NULL)
2684 clk_put(pce_dev->ce_core_src_clk);
2685 clk_put(pce_dev->ce_clk);
2686 clk_put(pce_dev->ce_core_clk);
2687
2688 qce_sps_exit(pce_dev);
2689 kfree(handle);
2690
2691 return 0;
2692}
2693EXPORT_SYMBOL(qce_close);
2694
2695int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2696{
2697 struct qce_device *pce_dev = (struct qce_device *)handle;
2698
2699 if (ce_support == NULL)
2700 return -EINVAL;
2701
2702 ce_support->sha1_hmac_20 = false;
2703 ce_support->sha1_hmac = false;
2704 ce_support->sha256_hmac = false;
2705 ce_support->sha_hmac = true;
2706 ce_support->cmac = true;
2707 ce_support->aes_key_192 = false;
2708 ce_support->aes_xts = true;
2709 ce_support->ota = false;
2710 ce_support->bam = true;
2711 if (pce_dev->ce_sps.minor_version) {
2712 ce_support->aligned_only = false;
2713 ce_support->aes_ccm = true;
2714 } else {
2715 ce_support->aligned_only = true;
2716 ce_support->aes_ccm = false;
2717 }
2718 return 0;
2719}
2720EXPORT_SYMBOL(qce_hw_support);
2721
2722static int __init qce_init(void)
2723{
2724 bam_registry.handle = 0;
2725 bam_registry.cnt = 0;
2726 return 0;
2727}
2728
2729static void __exit qce_exit(void)
2730{
2731 bam_registry.handle = 0;
2732 bam_registry.cnt = 0;
2733}
2734
2735module_init(qce_init);
2736module_exit(qce_exit);
2737
2738MODULE_LICENSE("GPL v2");
2739MODULE_DESCRIPTION("Crypto Engine driver");