blob: 7724d67cd683c4264034df77a9b87d3a79a5badb [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
27#include <crypto/hash.h>
28#include <crypto/sha.h>
29#include <mach/dma.h>
30#include <mach/clk.h>
31#include "inc/qce.h"
32#include "inc/qcedev.h"
33#include "inc/qcryptohw_40.h"
34
35/* ADM definitions */
36#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
37#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
38#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
39#define ADM_DESC_LAST (1 << 31)
40
41/* Data xfer between DM and CE in blocks of 16 bytes */
42#define ADM_CE_BLOCK_SIZE 16
43
44#define ADM_DESC_LENGTH_MASK 0xffff
45#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
46
47struct dmov_desc {
48 uint32_t addr;
49 uint32_t len;
50};
51
52#define ADM_STATUS_OK 0x80000002
53
54/* Misc definitions */
55
56/* QCE max number of descriptor in a descriptor list */
57#define QCE_MAX_NUM_DESC 128
58
59/* State of DM channel */
60enum qce_chan_st_enum {
61 QCE_CHAN_STATE_IDLE = 0,
62 QCE_CHAN_STATE_IN_PROG = 1,
63 QCE_CHAN_STATE_COMP = 2,
64 QCE_CHAN_STATE_LAST
65};
66
67/*
68 * CE HW device structure.
69 * Each engine has an instance of the structure.
70 * Each engine can only handle one crypto operation at one time. It is up to
71 * the sw above to ensure single threading of operation on an engine.
72 */
73struct qce_device {
74 struct device *pdev; /* Handle to platform_device structure */
75 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
76 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
77 void __iomem *iobase; /* Virtual io base of CE HW */
78 unsigned int phy_iobase; /* Physical io base of CE HW */
79 struct clk *ce_core_clk; /* Handle to CE clk */
80 struct clk *ce_clk; /* Handle to CE clk */
81 unsigned int crci_in; /* CRCI for CE DM IN Channel */
82 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
83 unsigned int chan_ce_in; /* ADM channel used for CE input
84 * and auth result if authentication
85 * only operation. */
86 unsigned int chan_ce_out; /* ADM channel used for CE output,
87 and icv for esp */
88 unsigned int *cmd_pointer_list_ce_in;
89 dma_addr_t phy_cmd_pointer_list_ce_in;
90
91 unsigned int *cmd_pointer_list_ce_out;
92 dma_addr_t phy_cmd_pointer_list_ce_out;
93
94 unsigned char *cmd_list_ce_in;
95 dma_addr_t phy_cmd_list_ce_in;
96
97 unsigned char *cmd_list_ce_out;
98 dma_addr_t phy_cmd_list_ce_out;
99
100 struct dmov_desc *ce_out_src_desc;
101 dma_addr_t phy_ce_out_src_desc;
102
103 struct dmov_desc *ce_out_dst_desc;
104 dma_addr_t phy_ce_out_dst_desc;
105
106 struct dmov_desc *ce_in_src_desc;
107 dma_addr_t phy_ce_in_src_desc;
108
109 struct dmov_desc *ce_in_dst_desc;
110 dma_addr_t phy_ce_in_dst_desc;
111
112 unsigned char *ce_out_ignore;
113 dma_addr_t phy_ce_out_ignore;
114
115 unsigned char *ce_pad;
116 dma_addr_t phy_ce_pad;
117
118 struct msm_dmov_cmd *chan_ce_in_cmd;
119 struct msm_dmov_cmd *chan_ce_out_cmd;
120
121 uint32_t ce_out_ignore_size;
122
123 int ce_out_dst_desc_index;
124 int ce_in_dst_desc_index;
125
126 int ce_out_src_desc_index;
127 int ce_in_src_desc_index;
128
129 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
130 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
131
132 int chan_ce_in_status; /* chan ce_in status */
133 int chan_ce_out_status; /* chan ce_out status */
134
135 unsigned char *dig_result;
136 dma_addr_t phy_dig_result;
137
138 /* cached aes key */
139 uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
140
141 uint32_t cipher_key_size; /* cached aes key size in bytes */
142 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
143
144 int assoc_nents;
145 int ivsize;
146 int authsize;
147 int src_nents;
148 int dst_nents;
149
150 void *areq;
151 enum qce_cipher_mode_enum mode;
152
153 dma_addr_t phy_iv_in;
154};
155
156/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
157static uint32_t _std_init_vector_sha1[] = {
158 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
159};
160/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
161static uint32_t _std_init_vector_sha256[] = {
162 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
163 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
164};
165
166static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
167 unsigned int len)
168{
169 unsigned n;
170
171 n = len / sizeof(uint32_t) ;
172 for (; n > 0; n--) {
173 *iv = ((*b << 24) & 0xff000000) |
174 (((*(b+1)) << 16) & 0xff0000) |
175 (((*(b+2)) << 8) & 0xff00) |
176 (*(b+3) & 0xff);
177 b += sizeof(uint32_t);
178 iv++;
179 }
180
181 n = len % sizeof(uint32_t);
182 if (n == 3) {
183 *iv = ((*b << 24) & 0xff000000) |
184 (((*(b+1)) << 16) & 0xff0000) |
185 (((*(b+2)) << 8) & 0xff00) ;
186 } else if (n == 2) {
187 *iv = ((*b << 24) & 0xff000000) |
188 (((*(b+1)) << 16) & 0xff0000) ;
189 } else if (n == 1) {
190 *iv = ((*b << 24) & 0xff000000) ;
191 }
192}
193
194static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
195 unsigned int len)
196{
197 unsigned i, j;
198 unsigned char swap_iv[AES_IV_LENGTH];
199
200 memset(swap_iv, 0, AES_IV_LENGTH);
201 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
202 swap_iv[i] = b[j];
203 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
204}
205
206static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
207 unsigned int len)
208{
209 unsigned n = len / sizeof(uint32_t);
210
211 for (; n > 0; n--) {
212 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
213 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
214 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
215 *b++ = (unsigned char) (*iv & 0xff);
216 iv++;
217 }
218 n = len % sizeof(uint32_t);
219 if (n == 3) {
220 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
221 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
222 *b = (unsigned char) ((*iv >> 8) & 0xff);
223 } else if (n == 2) {
224 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
225 *b = (unsigned char) ((*iv >> 16) & 0xff);
226 } else if (n == 1) {
227 *b = (unsigned char) ((*iv >> 24) & 0xff);
228 }
229}
230
231static int count_sg(struct scatterlist *sg, int nbytes)
232{
233 int i;
234
235 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
236 nbytes -= sg->length;
237 return i;
238}
239
240static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
241 struct scatterlist *sg)
242{
243 int i;
244 for (i = 0; i < entries; i++) {
245
246 sg->dma_address = (dma_addr_t)pmem->offset;
247 sg++;
248 pmem++;
249 }
250 return 0;
251}
252
253static int _probe_ce_engine(struct qce_device *pce_dev)
254{
255 unsigned int val;
256 unsigned int rev;
257
258 val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
259 if (((val & 0xfffffff) != 0x0000042) &&
260 ((val & 0xfffffff) != 0x0000040)) {
261 dev_err(pce_dev->pdev,
262 "Unknown Qualcomm crypto device at 0x%x 0x%x\n",
263 pce_dev->phy_iobase, val);
264 return -EIO;
265 };
266 rev = (val & CRYPTO_CORE_REV_MASK);
267 if (rev == 0x42) {
268 dev_info(pce_dev->pdev,
269 "Qualcomm Crypto 4.2 device found at 0x%x\n",
270 pce_dev->phy_iobase);
271 } else {
272 if (rev == 0x40) {
273 dev_info(pce_dev->pdev,
274 "Qualcomm Crypto 4.0 device found at 0x%x\n",
275 pce_dev->phy_iobase);
276 }
277 }
278
279 dev_info(pce_dev->pdev,
280 "IO base 0x%x, ce_in channel %d, "
281 "ce_out channel %d, "
282 "crci_in %d, crci_out %d\n",
283 (unsigned int) pce_dev->iobase,
284 pce_dev->chan_ce_in, pce_dev->chan_ce_out,
285 pce_dev->crci_in, pce_dev->crci_out);
286
287 pce_dev->cipher_key_size = 0;
288
289 return 0;
290};
291
292static int _init_ce_engine(struct qce_device *pce_dev)
293{
294 unsigned int val;
295
296 /* Reset ce */
297 clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
298 clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
299 /*
300 * Ensure previous instruction (any writes to CLK registers)
301 * to toggle the CLK reset lines was completed.
302 */
303 dsb();
304 /* configure ce */
305 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
306 (1 << CRYPTO_MASK_OP_DONE_INTR) |
307 (1 << CRYPTO_MASK_ERR_INTR);
308 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
309 /*
310 * Ensure previous instruction (writel_relaxed to config register bit)
311 * was completed.
312 */
313 dsb();
314 val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
315 if (!val) {
316 dev_err(pce_dev->pdev,
317 "unknown Qualcomm crypto device at 0x%x\n",
318 pce_dev->phy_iobase);
319 return -EIO;
320 };
321 if (_probe_ce_engine(pce_dev) < 0)
322 return -EIO;
323 return 0;
324};
325
326static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
327{
328 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
329 uint32_t diglen;
330 int i;
331 uint32_t auth_cfg = 0;
332 bool sha1 = false;
333
334 if (sreq->alg == QCE_HASH_AES_CMAC) {
335 uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
337 uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
338 /* Clear auth_ivn, auth_keyn registers */
339 for (i = 0; i < 16; i++) {
340 writel_relaxed(0, (pce_dev->iobase +
341 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
342 writel_relaxed(0, (pce_dev->iobase +
343 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
344 }
345 /* write auth_bytecnt 0/1/2/3, start with 0 */
346 for (i = 0; i < 4; i++)
347 writel_relaxed(0, pce_dev->iobase +
348 CRYPTO_AUTH_BYTECNT0_REG +
349 i * sizeof(uint32_t));
350
351 _byte_stream_to_net_words(authkey32, sreq->authkey,
352 sreq->authklen);
353 for (i = 0; i < authklen32; i++)
354 writel_relaxed(authkey32[i], pce_dev->iobase +
355 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
356 /*
357 * write seg_cfg
358 */
359 auth_cfg |= (1 << CRYPTO_LAST);
360 auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
361 auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
362 CRYPTO_AUTH_SIZE);
363 auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
364
365 switch (sreq->authklen) {
366 case AES128_KEY_SIZE:
367 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
368 CRYPTO_AUTH_KEY_SIZE);
369 break;
370 case AES256_KEY_SIZE:
371 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
372 CRYPTO_AUTH_KEY_SIZE);
373 break;
374 default:
375 break;
376 }
377
378 goto go_proc;
379 }
380
381 /* if not the last, the size has to be on the block boundary */
382 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
383 return -EIO;
384
385 switch (sreq->alg) {
386 case QCE_HASH_SHA1:
387 case QCE_HASH_SHA1_HMAC:
388 diglen = SHA1_DIGEST_SIZE;
389 sha1 = true;
390 break;
391 case QCE_HASH_SHA256:
392 case QCE_HASH_SHA256_HMAC:
393 diglen = SHA256_DIGEST_SIZE;
394 break;
395 default:
396 return -EINVAL;
397 }
398
399 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
400 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
401 uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
402 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
403 uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
404
405 _byte_stream_to_net_words(hmackey, sreq->authkey,
406 sreq->authklen);
407 /* write hmac key */
408 for (i = 0; i < hmacklen; i++)
409 writel_relaxed(hmackey[i], pce_dev->iobase +
410 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
411
412 auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
413 } else {
414 auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
415 }
416
417 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
418
419 if (sreq->first_blk) {
420 if (sha1) {
421 for (i = 0; i < 5; i++)
422 auth32[i] = _std_init_vector_sha1[i];
423 } else {
424 for (i = 0; i < 8; i++)
425 auth32[i] = _std_init_vector_sha256[i];
426 }
427 } else {
428 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
429 }
430
431 for (i = 0; i < 5; i++)
432 writel_relaxed(auth32[i], (pce_dev->iobase +
433 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
434
435 if ((sreq->alg == QCE_HASH_SHA256) ||
436 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
437 writel_relaxed(auth32[5], pce_dev->iobase +
438 CRYPTO_AUTH_IV5_REG);
439 writel_relaxed(auth32[6], pce_dev->iobase +
440 CRYPTO_AUTH_IV6_REG);
441 writel_relaxed(auth32[7], pce_dev->iobase +
442 CRYPTO_AUTH_IV7_REG);
443 }
444
445 /* write auth_bytecnt 0/1, start with 0 */
446 for (i = 0; i < 4; i++)
447 writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
448 (CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
449
450 /* write seg_cfg */
451 if (sha1)
452 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
453 else
454 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
455
456 if (sreq->last_blk)
457 auth_cfg |= 1 << CRYPTO_LAST;
458
459 auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
460
461go_proc:
462 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
463
464 /* write seg_cfg */
465 writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
466
467 /* write seg_size */
468 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
469
470 /* write auth_seg_size */
471 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
472
473 /* write auth_seg_start */
474 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
475 /*
476 * Ensure previous instructions (write to all AUTH registers)
477 * was completed before accessing a register that is not in
478 * in the same 1K range.
479 */
480 dsb();
481
482 writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
483 /*
484 * Ensure previous instructions (setting all the CE registers)
485 * was completed before writing to GO register
486 */
487 dsb();
488 /* issue go to crypto */
489 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
490 /*
491 * Ensure previous instructions (setting the GO register)
492 * was completed before issuing a DMA transfer request
493 */
494 dsb();
495
496 return 0;
497}
498
499static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
500 uint32_t totallen_in, uint32_t coffset)
501{
502 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
503 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
504 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
505 0, 0, 0, 0};
506 uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
507 int aes_key_chg;
508 int i;
509 uint32_t encr_cfg = 0;
510 uint32_t ivsize = creq->ivsize;
511
512 if (creq->mode == QCE_MODE_XTS)
513 _byte_stream_to_net_words(enckey32, creq->enckey,
514 creq->encklen/2);
515 else
516 _byte_stream_to_net_words(enckey32, creq->enckey,
517 creq->encklen);
518
519 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
520 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
521 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
522 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
523 uint32_t auth_cfg = 0;
524
525 /* Clear auth_ivn, auth_keyn registers */
526 for (i = 0; i < 16; i++) {
527 writel_relaxed(0, (pce_dev->iobase +
528 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
529 writel_relaxed(0, (pce_dev->iobase +
530 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
531 }
532 /* write auth_bytecnt 0/1/2/3, start with 0 */
533 for (i = 0; i < 4; i++)
534 writel_relaxed(0, pce_dev->iobase +
535 CRYPTO_AUTH_BYTECNT0_REG +
536 i * sizeof(uint32_t));
537 /* write auth key */
538 for (i = 0; i < authklen32; i++)
539 writel_relaxed(enckey32[i], pce_dev->iobase +
540 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
541
542 /* write nonce */
543 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
544 for (i = 0; i < noncelen32; i++)
545 writel_relaxed(nonce32[i], pce_dev->iobase +
546 CRYPTO_AUTH_INFO_NONCE0_REG +
547 (i*sizeof(uint32_t)));
548
549 auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
550 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
551 auth_cfg |= (1 << CRYPTO_LAST);
552 if (creq->dir == QCE_ENCRYPT)
553 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
554 else
555 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
556 auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
557 auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
558 if (creq->authklen == AES128_KEY_SIZE)
559 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
560 CRYPTO_AUTH_KEY_SIZE);
561 else {
562 if (creq->authklen == AES256_KEY_SIZE)
563 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
564 CRYPTO_AUTH_KEY_SIZE);
565 }
566 auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
567 writel_relaxed(auth_cfg, pce_dev->iobase +
568 CRYPTO_AUTH_SEG_CFG_REG);
569 if (creq->dir == QCE_ENCRYPT)
570 writel_relaxed(totallen_in, pce_dev->iobase +
571 CRYPTO_AUTH_SEG_SIZE_REG);
572 else
573 writel_relaxed((totallen_in - creq->authsize),
574 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
575 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
576 } else {
577 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
578 }
579 /*
580 * Ensure previous instructions (write to all AUTH registers)
581 * was completed before accessing a register that is not in
582 * in the same 1K range.
583 */
584 dsb();
585
586 switch (creq->mode) {
587 case QCE_MODE_ECB:
588 encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
589 break;
590
591 case QCE_MODE_CBC:
592 encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
593 break;
594
595 case QCE_MODE_XTS:
596 encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
597 break;
598
599 case QCE_MODE_CCM:
600 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
601 break;
602
603 case QCE_MODE_CTR:
604 default:
605 encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
606 break;
607 }
608 pce_dev->mode = creq->mode;
609
610 switch (creq->alg) {
611 case CIPHER_ALG_DES:
612 if (creq->mode != QCE_MODE_ECB) {
613 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
614 writel_relaxed(enciv32[0], pce_dev->iobase +
615 CRYPTO_CNTR0_IV0_REG);
616 writel_relaxed(enciv32[1], pce_dev->iobase +
617 CRYPTO_CNTR1_IV1_REG);
618 }
619 writel_relaxed(enckey32[0], pce_dev->iobase +
620 CRYPTO_ENCR_KEY0_REG);
621 writel_relaxed(enckey32[1], pce_dev->iobase +
622 CRYPTO_ENCR_KEY1_REG);
623 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
624 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
625 break;
626
627 case CIPHER_ALG_3DES:
628 if (creq->mode != QCE_MODE_ECB) {
629 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
630 writel_relaxed(enciv32[0], pce_dev->iobase +
631 CRYPTO_CNTR0_IV0_REG);
632 writel_relaxed(enciv32[1], pce_dev->iobase +
633 CRYPTO_CNTR1_IV1_REG);
634 }
635 for (i = 0; i < 6; i++)
636 writel_relaxed(enckey32[0], (pce_dev->iobase +
637 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
638
639 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
640 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
641 break;
642
643 case CIPHER_ALG_AES:
644 default:
645 if (creq->mode == QCE_MODE_XTS) {
646 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
647 = {0, 0, 0, 0, 0, 0, 0, 0};
648 uint32_t xtsklen =
649 creq->encklen/(2 * sizeof(uint32_t));
650
651 _byte_stream_to_net_words(xtskey32, (creq->enckey +
652 creq->encklen/2), creq->encklen/2);
653 for (i = 0; i < xtsklen; i++)
654 writel_relaxed(xtskey32[i], pce_dev->iobase +
655 CRYPTO_ENCR_XTS_KEY0_REG +
656 (i * sizeof(uint32_t)));
657
658 writel_relaxed(creq->cryptlen ,
659 pce_dev->iobase +
660 CRYPTO_ENCR_XTS_DU_SIZE_REG);
661 }
662 if (creq->mode != QCE_MODE_ECB) {
663 if (creq->mode == QCE_MODE_XTS)
664 _byte_stream_swap_to_net_words(enciv32,
665 creq->iv, ivsize);
666 else
667 _byte_stream_to_net_words(enciv32, creq->iv,
668 ivsize);
669 for (i = 0; i <= 3; i++)
670 writel_relaxed(enciv32[i], pce_dev->iobase +
671 CRYPTO_CNTR0_IV0_REG +
672 (i * sizeof(uint32_t)));
673 }
674 /* set number of counter bits */
675 writel_relaxed(0xffffffff, pce_dev->iobase +
676 CRYPTO_CNTR_MASK_REG);
677
678 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
679 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
680 CRYPTO_ENCR_KEY_SZ);
681 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
682 } else {
683 uint32_t key_size;
684
685 if (creq->mode == QCE_MODE_XTS) {
686 key_size = creq->encklen/2;
687 enck_size_in_word = key_size/sizeof(uint32_t);
688 } else {
689 key_size = creq->encklen;
690 }
691
692 switch (key_size) {
693 case AES128_KEY_SIZE:
694 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
695 CRYPTO_ENCR_KEY_SZ);
696 break;
697 case AES256_KEY_SIZE:
698 default:
699 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
700 CRYPTO_ENCR_KEY_SZ);
701
702 /* check for null key. If null, use hw key*/
703 for (i = 0; i < enck_size_in_word; i++) {
704 if (enckey32[i] != 0)
705 break;
706 }
707 if (i == enck_size_in_word)
708 encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
709 break;
710 } /* end of switch (creq->encklen) */
711
712 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
713 if (pce_dev->cipher_key_size != creq->encklen)
714 aes_key_chg = 1;
715 else {
716 for (i = 0; i < enck_size_in_word; i++) {
717 if (enckey32[i]
718 != pce_dev->cipher_key[i])
719 break;
720 }
721 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
722 }
723
724 if (aes_key_chg) {
725 for (i = 0; i < enck_size_in_word; i++)
726 writel_relaxed(enckey32[i],
727 pce_dev->iobase +
728 CRYPTO_ENCR_KEY0_REG +
729 (i * sizeof(uint32_t)));
730 pce_dev->cipher_key_size = creq->encklen;
731 for (i = 0; i < enck_size_in_word; i++)
732 pce_dev->cipher_key[i] = enckey32[i];
733 } /*if (aes_key_chg) { */
734 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
735 break;
736 } /* end of switch (creq->mode) */
737
738 /* write encr seg cfg */
739 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
740
741 /* write encr seg cfg */
742 writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
743
744 /* write encr seg size */
745 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
746 writel_relaxed((creq->cryptlen + creq->authsize),
747 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
748 else
749 writel_relaxed(creq->cryptlen,
750 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
751 /* write encr seg start */
752 writel_relaxed((coffset & 0xffff),
753 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
754 /* write seg size */
755 writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
756 /*
757 * Ensure previous instructions (setting all the CE registers)
758 * was completed before writing to GO register
759 */
760 dsb();
761 /* issue go to crypto */
762 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
763 /*
764 * Ensure previous instructions (setting the GO register)
765 * was completed before issuing a DMA transfer request
766 */
767 dsb();
768 return 0;
769};
770
771static int _aead_complete(struct qce_device *pce_dev)
772{
773 struct aead_request *areq;
774 int i;
775 uint32_t ivsize;
776 uint32_t iv_out[4];
777 unsigned char iv[4 * sizeof(uint32_t)];
778
779 areq = (struct aead_request *) pce_dev->areq;
780 ivsize = pce_dev->ivsize;
781
782 if (areq->src != areq->dst) {
783 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
784 DMA_FROM_DEVICE);
785 }
786 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
787 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
788 DMA_TO_DEVICE);
789
790 if (pce_dev->mode != QCE_MODE_CCM)
791 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
792 ivsize, DMA_TO_DEVICE);
793 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
794 DMA_TO_DEVICE);
795
796 /* get iv out */
797 if ((pce_dev->mode == QCE_MODE_ECB) ||
798 (pce_dev->mode == QCE_MODE_CCM)) {
799 if (pce_dev->mode == QCE_MODE_CCM) {
800 int result;
801 result = readl_relaxed(pce_dev->iobase +
802 CRYPTO_STATUS_REG);
803 result &= (1 << CRYPTO_MAC_FAILED);
804 result |= (pce_dev->chan_ce_in_status |
805 pce_dev->chan_ce_out_status);
806 dsb();
807 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
808 result);
809 } else {
810 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
811 pce_dev->chan_ce_in_status |
812 pce_dev->chan_ce_out_status);
813 }
814 } else {
815 for (i = 0; i < 4; i++)
816 iv_out[i] = readl_relaxed(pce_dev->iobase +
817 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
818
819 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
820 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
821 pce_dev->chan_ce_in_status |
822 pce_dev->chan_ce_out_status);
823 };
824 return 0;
825};
826
827static void _sha_complete(struct qce_device *pce_dev)
828{
829
830 struct ahash_request *areq;
831 uint32_t auth_data[4];
832 uint32_t digest[8];
833 int i;
834
835 areq = (struct ahash_request *) pce_dev->areq;
836 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
837 DMA_TO_DEVICE);
838
839 for (i = 0; i < 4; i++)
840 auth_data[i] = readl_relaxed(pce_dev->iobase +
841 (CRYPTO_AUTH_BYTECNT0_REG +
842 i * sizeof(uint32_t)));
843
844 for (i = 0; i < 8; i++)
845 digest[i] = readl_relaxed(pce_dev->iobase +
846 CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
847
848 _net_words_to_byte_stream(digest, pce_dev->dig_result,
849 SHA256_DIGEST_SIZE);
850
851 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
852 pce_dev->chan_ce_in_status);
853};
854
855static int _ablk_cipher_complete(struct qce_device *pce_dev)
856{
857 struct ablkcipher_request *areq;
858 uint32_t iv_out[4];
859 unsigned char iv[4 * sizeof(uint32_t)];
860
861 areq = (struct ablkcipher_request *) pce_dev->areq;
862
863 if (areq->src != areq->dst) {
864 dma_unmap_sg(pce_dev->pdev, areq->dst,
865 pce_dev->dst_nents, DMA_FROM_DEVICE);
866 }
867 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
868 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
869 DMA_TO_DEVICE);
870 /* get iv out */
871 if (pce_dev->mode == QCE_MODE_ECB) {
872 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
873 pce_dev->chan_ce_out_status);
874 } else {
875 int i;
876
877 for (i = 0; i < 4; i++)
878 iv_out[i] = readl_relaxed(pce_dev->iobase +
879 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
880
881 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
882 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
883 pce_dev->chan_ce_out_status);
884 }
885
886 return 0;
887};
888
889static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
890{
891 struct ablkcipher_request *areq;
892 uint32_t iv_out[4];
893 unsigned char iv[4 * sizeof(uint32_t)];
894
895 areq = (struct ablkcipher_request *) pce_dev->areq;
896
897 /* get iv out */
898 if (pce_dev->mode == QCE_MODE_ECB) {
899 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
900 pce_dev->chan_ce_out_status);
901 } else {
902 int i;
903
904 for (i = 0; i < 4; i++)
905 iv_out[i] = readl_relaxed(pce_dev->iobase +
906 CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
907
908 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
909 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
910 pce_dev->chan_ce_out_status);
911 }
912
913 return 0;
914};
915
916static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
917 unsigned int plen, unsigned int paddr, int *index)
918{
919 while (plen > 0x8000) {
920 pdesc->len = 0x8000;
921 if (paddr > 0) {
922 pdesc->addr = paddr;
923 paddr += 0x8000;
924 }
925 plen -= pdesc->len;
926 if (plen > 0) {
927 *index = (*index) + 1;
928 if ((*index) >= QCE_MAX_NUM_DESC)
929 return -ENOMEM;
930 pdesc++;
931 }
932 }
933 if ((plen > 0) && (plen <= 0x8000)) {
934 pdesc->len = plen;
935 if (paddr > 0)
936 pdesc->addr = paddr;
937 }
938
939 return 0;
940}
941
942static int _chain_sg_buffer_in(struct qce_device *pce_dev,
943 struct scatterlist *sg, unsigned int nbytes)
944{
945 unsigned int len;
946 unsigned int dlen;
947 struct dmov_desc *pdesc;
948
949 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
950 if (nbytes > 0x8000)
951 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
952 &pce_dev->ce_in_dst_desc_index);
953 else
954 pdesc->len = nbytes;
955
956 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
957 /*
958 * Two consective chunks may be handled by the old
959 * buffer descriptor.
960 */
961 while (nbytes > 0) {
962 len = min(nbytes, sg_dma_len(sg));
963 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
964 nbytes -= len;
965 if (dlen == 0) {
966 pdesc->addr = sg_dma_address(sg);
967 pdesc->len = len;
968 if (pdesc->len > 0x8000)
969 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
970 sg_dma_address(sg),
971 &pce_dev->ce_in_src_desc_index);
972 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
973 pdesc->len = dlen + len;
974 if (pdesc->len > 0x8000)
975 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
976 pdesc->addr,
977 &pce_dev->ce_in_src_desc_index);
978 } else {
979 pce_dev->ce_in_src_desc_index++;
980 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
981 return -ENOMEM;
982 pdesc++;
983 pdesc->len = len;
984 pdesc->addr = sg_dma_address(sg);
985 if (pdesc->len > 0x8000)
986 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
987 sg_dma_address(sg),
988 &pce_dev->ce_in_src_desc_index);
989 }
990 if (nbytes > 0)
991 sg = sg_next(sg);
992 }
993 return 0;
994}
995
996static int _chain_pm_buffer_in(struct qce_device *pce_dev,
997 unsigned int pmem, unsigned int nbytes)
998{
999 unsigned int dlen;
1000 struct dmov_desc *pdesc;
1001
1002 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1003 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1004 if (dlen == 0) {
1005 pdesc->addr = pmem;
1006 pdesc->len = nbytes;
1007 } else if (pmem == (pdesc->addr + dlen)) {
1008 pdesc->len = dlen + nbytes;
1009 } else {
1010 pce_dev->ce_in_src_desc_index++;
1011 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1012 return -ENOMEM;
1013 pdesc++;
1014 pdesc->len = nbytes;
1015 pdesc->addr = pmem;
1016 }
1017 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1018 pdesc->len += nbytes;
1019
1020 return 0;
1021}
1022
1023static void _chain_buffer_in_init(struct qce_device *pce_dev)
1024{
1025 struct dmov_desc *pdesc;
1026
1027 pce_dev->ce_in_src_desc_index = 0;
1028 pce_dev->ce_in_dst_desc_index = 0;
1029 pdesc = pce_dev->ce_in_src_desc;
1030 pdesc->len = 0;
1031}
1032
1033static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
1034{
1035 struct dmov_desc *pdesc;
1036 dmov_sg *pcmd;
1037
1038 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1039 pdesc->len |= ADM_DESC_LAST;
1040 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1041 pdesc->len |= ADM_DESC_LAST;
1042
1043 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1044 pcmd->cmd |= CMD_LC;
1045}
1046
1047#ifdef QCE_DEBUG
1048static void _ce_in_dump(struct qce_device *pce_dev)
1049{
1050 int i;
1051 struct dmov_desc *pdesc;
1052
1053 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
1054 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1055 pdesc = pce_dev->ce_in_src_desc + i;
1056 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1057 pdesc->len);
1058 }
1059 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1060 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1061 pdesc = pce_dev->ce_in_dst_desc + i;
1062 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1063 pdesc->len);
1064 }
1065};
1066
1067static void _ce_out_dump(struct qce_device *pce_dev)
1068{
1069 int i;
1070 struct dmov_desc *pdesc;
1071
1072 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1073 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1074 pdesc = pce_dev->ce_out_src_desc + i;
1075 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1076 pdesc->len);
1077 }
1078
1079 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
1080 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1081 pdesc = pce_dev->ce_out_dst_desc + i;
1082 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1083 pdesc->len);
1084 }
1085};
1086
1087#else
1088
1089static void _ce_in_dump(struct qce_device *pce_dev)
1090{
1091};
1092
1093static void _ce_out_dump(struct qce_device *pce_dev)
1094{
1095};
1096
1097#endif
1098
1099static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1100 struct scatterlist *sg, unsigned int nbytes)
1101{
1102 unsigned int len;
1103 unsigned int dlen;
1104 struct dmov_desc *pdesc;
1105
1106 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1107 if (nbytes > 0x8000)
1108 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1109 &pce_dev->ce_out_src_desc_index);
1110 else
1111 pdesc->len = nbytes;
1112
1113 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1114 /*
1115 * Two consective chunks may be handled by the old
1116 * buffer descriptor.
1117 */
1118 while (nbytes > 0) {
1119 len = min(nbytes, sg_dma_len(sg));
1120 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1121 nbytes -= len;
1122 if (dlen == 0) {
1123 pdesc->addr = sg_dma_address(sg);
1124 pdesc->len = len;
1125 if (pdesc->len > 0x8000)
1126 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1127 sg_dma_address(sg),
1128 &pce_dev->ce_out_dst_desc_index);
1129 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1130 pdesc->len = dlen + len;
1131 if (pdesc->len > 0x8000)
1132 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1133 pdesc->addr,
1134 &pce_dev->ce_out_dst_desc_index);
1135
1136 } else {
1137 pce_dev->ce_out_dst_desc_index++;
1138 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1139 return -EIO;
1140 pdesc++;
1141 pdesc->len = len;
1142 pdesc->addr = sg_dma_address(sg);
1143 if (pdesc->len > 0x8000)
1144 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1145 sg_dma_address(sg),
1146 &pce_dev->ce_out_dst_desc_index);
1147
1148 }
1149 if (nbytes > 0)
1150 sg = sg_next(sg);
1151 }
1152 return 0;
1153}
1154
1155static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1156 unsigned int pmem, unsigned int nbytes)
1157{
1158 unsigned int dlen;
1159 struct dmov_desc *pdesc;
1160
1161 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1162 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1163
1164 if (dlen == 0) {
1165 pdesc->addr = pmem;
1166 pdesc->len = nbytes;
1167 } else if (pmem == (pdesc->addr + dlen)) {
1168 pdesc->len = dlen + nbytes;
1169 } else {
1170 pce_dev->ce_out_dst_desc_index++;
1171 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1172 return -EIO;
1173 pdesc++;
1174 pdesc->len = nbytes;
1175 pdesc->addr = pmem;
1176 }
1177 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1178 pdesc->len += nbytes;
1179
1180 return 0;
1181};
1182
1183static void _chain_buffer_out_init(struct qce_device *pce_dev)
1184{
1185 struct dmov_desc *pdesc;
1186
1187 pce_dev->ce_out_dst_desc_index = 0;
1188 pce_dev->ce_out_src_desc_index = 0;
1189 pdesc = pce_dev->ce_out_dst_desc;
1190 pdesc->len = 0;
1191};
1192
1193static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
1194{
1195 struct dmov_desc *pdesc;
1196 dmov_sg *pcmd;
1197
1198 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1199 pdesc->len |= ADM_DESC_LAST;
1200 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1201 pdesc->len |= ADM_DESC_LAST;
1202 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1203 pcmd->cmd |= CMD_LC;
1204};
1205
1206static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1207 unsigned int result, struct msm_dmov_errdata *err)
1208{
1209 struct qce_device *pce_dev;
1210
1211 pce_dev = (struct qce_device *) cmd_ptr->user;
1212 if (result != ADM_STATUS_OK) {
1213 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1214 result);
1215 pce_dev->chan_ce_in_status = -1;
1216 } else {
1217 pce_dev->chan_ce_in_status = 0;
1218 }
1219
1220 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1221 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1222 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1223 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1224
1225 /* done */
1226 _aead_complete(pce_dev);
1227 }
1228};
1229
1230static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1231 unsigned int result, struct msm_dmov_errdata *err)
1232{
1233 struct qce_device *pce_dev;
1234
1235 pce_dev = (struct qce_device *) cmd_ptr->user;
1236 if (result != ADM_STATUS_OK) {
1237 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1238 result);
1239 pce_dev->chan_ce_out_status = -1;
1240 } else {
1241 pce_dev->chan_ce_out_status = 0;
1242 };
1243
1244 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1245 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1246 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1247 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1248
1249 /* done */
1250 _aead_complete(pce_dev);
1251 }
1252
1253};
1254
1255static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1256 unsigned int result, struct msm_dmov_errdata *err)
1257{
1258 struct qce_device *pce_dev;
1259
1260 pce_dev = (struct qce_device *) cmd_ptr->user;
1261 if (result != ADM_STATUS_OK) {
1262 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1263 result);
1264 pce_dev->chan_ce_in_status = -1;
1265 } else {
1266 pce_dev->chan_ce_in_status = 0;
1267 }
1268 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1269 _sha_complete(pce_dev);
1270};
1271
1272static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1273 unsigned int result, struct msm_dmov_errdata *err)
1274{
1275 struct qce_device *pce_dev;
1276
1277 pce_dev = (struct qce_device *) cmd_ptr->user;
1278 if (result != ADM_STATUS_OK) {
1279 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1280 result);
1281 pce_dev->chan_ce_in_status = -1;
1282 } else {
1283 pce_dev->chan_ce_in_status = 0;
1284 }
1285
1286 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1287 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1288 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1289 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1290
1291 /* done */
1292 _ablk_cipher_complete(pce_dev);
1293 }
1294};
1295
1296static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1297 unsigned int result, struct msm_dmov_errdata *err)
1298{
1299 struct qce_device *pce_dev;
1300
1301 pce_dev = (struct qce_device *) cmd_ptr->user;
1302 if (result != ADM_STATUS_OK) {
1303 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1304 result);
1305 pce_dev->chan_ce_out_status = -1;
1306 } else {
1307 pce_dev->chan_ce_out_status = 0;
1308 };
1309
1310 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1311 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1312 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1313 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1314
1315 /* done */
1316 _ablk_cipher_complete(pce_dev);
1317 }
1318};
1319
1320
1321static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1322 unsigned int result, struct msm_dmov_errdata *err)
1323{
1324 struct qce_device *pce_dev;
1325
1326 pce_dev = (struct qce_device *) cmd_ptr->user;
1327 if (result != ADM_STATUS_OK) {
1328 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1329 result);
1330 pce_dev->chan_ce_in_status = -1;
1331 } else {
1332 pce_dev->chan_ce_in_status = 0;
1333 }
1334
1335 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1336 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1337 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1338 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1339
1340 /* done */
1341 _ablk_cipher_use_pmem_complete(pce_dev);
1342 }
1343};
1344
1345static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1346 unsigned int result, struct msm_dmov_errdata *err)
1347{
1348 struct qce_device *pce_dev;
1349
1350 pce_dev = (struct qce_device *) cmd_ptr->user;
1351 if (result != ADM_STATUS_OK) {
1352 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1353 result);
1354 pce_dev->chan_ce_out_status = -1;
1355 } else {
1356 pce_dev->chan_ce_out_status = 0;
1357 };
1358
1359 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1360 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1361 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1362 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1363
1364 /* done */
1365 _ablk_cipher_use_pmem_complete(pce_dev);
1366 }
1367};
1368
1369static int _setup_cmd_template(struct qce_device *pce_dev)
1370{
1371 dmov_sg *pcmd;
1372 struct dmov_desc *pdesc;
1373 unsigned char *vaddr;
1374 int i = 0;
1375
1376 /* Divide up the 4K coherent memory */
1377
1378 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1379 vaddr = pce_dev->coh_vmem;
1380 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1381 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1382 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1383 (vaddr - pce_dev->coh_vmem);
1384 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1385
1386 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1387 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1388 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1389 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1390 (vaddr - pce_dev->coh_vmem);
1391 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1392
1393 /* 3. ce_in channel command list of one scatter gather command */
1394 pce_dev->cmd_list_ce_in = vaddr;
1395 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1396 + (vaddr - pce_dev->coh_vmem);
1397 vaddr = vaddr + sizeof(dmov_sg);
1398
1399 /* 4. authentication result. */
1400 pce_dev->dig_result = vaddr;
1401 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1402 (vaddr - pce_dev->coh_vmem);
1403 vaddr = vaddr + SHA256_DIGESTSIZE;
1404
1405 /* 5. ce_out channel command list of one scatter gather command */
1406 pce_dev->cmd_list_ce_out = vaddr;
1407 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1408 + (vaddr - pce_dev->coh_vmem);
1409 vaddr = vaddr + sizeof(dmov_sg);
1410
1411 /* 6. ce_out channel command src descriptors, 1 entry */
1412 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1413 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1414 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1415 + (vaddr - pce_dev->coh_vmem);
1416 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1417
1418 /* 7. ce_out channel command dst descriptors, 128 entries. */
1419 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1420 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1421 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1422 + (vaddr - pce_dev->coh_vmem);
1423 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1424
1425 /* 8. pad area. */
1426 pce_dev->ce_pad = vaddr;
1427 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1428 (vaddr - pce_dev->coh_vmem);
1429
1430 /* Padding length is set to twice for worst case scenario in AES-CCM */
1431 vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
1432
1433 /* 9. ce_in channel command pointer list. */
1434 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1435 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1436 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1437 (vaddr - pce_dev->coh_vmem);
1438 vaddr = vaddr + sizeof(unsigned char *);
1439
1440 /* 10. ce_ou channel command pointer list. */
1441 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1442 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1443 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1444 (vaddr - pce_dev->coh_vmem);
1445 vaddr = vaddr + sizeof(unsigned char *);
1446
1447 /* 11. throw away area to store by-pass data from ce_out. */
1448 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1449 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1450 + (vaddr - pce_dev->coh_vmem);
1451 pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
1452 pce_dev->coh_vmem); /* at least 1.5 K of space */
1453 /*
1454 * The first command of command list ce_in is for the input of
1455 * concurrent operation of encrypt/decrypt or for the input
1456 * of authentication.
1457 */
1458 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1459 /* swap byte and half word , dst crci , scatter gather */
1460 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1461 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1462 pdesc = pce_dev->ce_in_src_desc;
1463 pdesc->addr = 0; /* to be filled in each operation */
1464 pdesc->len = 0; /* to be filled in each operation */
1465 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1466
1467 pdesc = pce_dev->ce_in_dst_desc;
1468 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1469 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1470 pdesc->len = 0; /* to be filled in each operation */
1471 pdesc++;
1472 }
1473 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1474 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1475 DST_INDEX_SG_CMD(0);
1476 pcmd++;
1477
1478 /* setup command pointer list */
1479 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1480 DMOV_CMD_ADDR((unsigned int)
1481 pce_dev->phy_cmd_list_ce_in));
1482 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1483 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1484 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1485 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1486 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1487 pce_dev->crci_in);
1488
1489
1490 /*
1491 * The first command in the command list ce_out.
1492 * It is for encry/decryp output.
1493 * If hashing only, ce_out is not used.
1494 */
1495 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1496 /* swap byte, half word, source crci, scatter gather */
1497 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1498 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1499
1500 pdesc = pce_dev->ce_out_src_desc;
1501 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1502 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1503 pdesc->len = 0; /* to be filled in each operation */
1504 pdesc++;
1505 }
1506 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1507
1508 pdesc = pce_dev->ce_out_dst_desc;
1509 pdesc->addr = 0; /* to be filled in each operation */
1510 pdesc->len = 0; /* to be filled in each operation */
1511 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1512 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1513 DST_INDEX_SG_CMD(0);
1514
1515 pcmd++;
1516
1517 /* setup command pointer list */
1518 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1519 DMOV_CMD_ADDR((unsigned int)pce_dev->
1520 phy_cmd_list_ce_out));
1521
1522 pce_dev->chan_ce_out_cmd->user = pce_dev;
1523 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1524 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1525 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1526 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1527 pce_dev->crci_out);
1528
1529 return 0;
1530};
1531
1532static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1533{
1534
1535 if (ce_in)
1536 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1537 else
1538 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1539
1540 if (ce_out)
1541 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1542 else
1543 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1544
1545 if (ce_in)
1546 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1547 pce_dev->chan_ce_in_cmd);
1548 if (ce_out)
1549 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1550 pce_dev->chan_ce_out_cmd);
1551
1552 return 0;
1553};
1554
1555int qce_aead_req(void *handle, struct qce_req *q_req)
1556{
1557 struct qce_device *pce_dev = (struct qce_device *) handle;
1558 struct aead_request *areq = (struct aead_request *) q_req->areq;
1559 uint32_t authsize = q_req->authsize;
1560 uint32_t totallen_in, totallen_out, out_len;
1561 uint32_t pad_len_in, pad_len_out;
1562 uint32_t pad_mac_len_out, pad_ptx_len_out;
1563 int rc = 0;
1564
1565 if (q_req->dir == QCE_ENCRYPT) {
1566 q_req->cryptlen = areq->cryptlen;
1567 totallen_in = q_req->cryptlen + areq->assoclen;
1568 totallen_out = q_req->cryptlen + authsize + areq->assoclen;
1569 out_len = areq->cryptlen + authsize;
1570 pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
1571 totallen_in;
1572 pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
1573 authsize;
1574 pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
1575 q_req->cryptlen;
1576 pad_len_out = pad_ptx_len_out + pad_mac_len_out;
1577 totallen_out += pad_len_out;
1578 } else {
1579 q_req->cryptlen = areq->cryptlen - authsize;
1580 totallen_in = areq->cryptlen + areq->assoclen;
1581 totallen_out = q_req->cryptlen + areq->assoclen;
1582 out_len = areq->cryptlen - authsize;
1583 pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
1584 areq->cryptlen;
1585 pad_len_out = pad_len_in + authsize;
1586 totallen_out += pad_len_out;
1587 }
1588
1589 _chain_buffer_in_init(pce_dev);
1590 _chain_buffer_out_init(pce_dev);
1591
1592 pce_dev->assoc_nents = 0;
1593 pce_dev->src_nents = 0;
1594 pce_dev->dst_nents = 0;
1595 pce_dev->ivsize = q_req->ivsize;
1596 pce_dev->authsize = q_req->authsize;
1597
1598 /* associated data input */
1599 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1600 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1601 DMA_TO_DEVICE);
1602 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1603 rc = -ENOMEM;
1604 goto bad;
1605 }
1606 /* cipher input */
1607 pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
1608 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1609 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1610 DMA_TO_DEVICE);
1611 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
1612 rc = -ENOMEM;
1613 goto bad;
1614 }
1615 /* pad data in */
1616 if (pad_len_in) {
1617 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1618 pad_len_in) < 0) {
1619 rc = -ENOMEM;
1620 goto bad;
1621 }
1622 }
1623
1624 /* ignore associated data */
1625 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1626 areq->assoclen) < 0) {
1627 rc = -ENOMEM;
1628 goto bad;
1629 }
1630 /* cipher + mac output for encryption */
1631 if (areq->src != areq->dst) {
1632 pce_dev->dst_nents = count_sg(areq->dst, out_len);
1633 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1634 DMA_FROM_DEVICE);
1635 };
1636 if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
1637 rc = -ENOMEM;
1638 goto bad;
1639 }
1640 /* pad data out */
1641 if (pad_len_out) {
1642 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1643 pad_len_out) < 0) {
1644 rc = -ENOMEM;
1645 goto bad;
1646 }
1647 }
1648
1649 /* finalize the ce_in and ce_out channels command lists */
1650 _ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
1651 _ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
1652
1653 /* set up crypto device */
1654 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
1655 if (rc < 0)
1656 goto bad;
1657
1658 /* setup for callback, and issue command to adm */
1659 pce_dev->areq = q_req->areq;
1660 pce_dev->qce_cb = q_req->qce_cb;
1661
1662 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
1663 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
1664
1665 _ce_in_dump(pce_dev);
1666 _ce_out_dump(pce_dev);
1667
1668 rc = _qce_start_dma(pce_dev, true, true);
1669 if (rc == 0)
1670 return 0;
1671bad:
1672 if (pce_dev->assoc_nents) {
1673 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1674 DMA_TO_DEVICE);
1675 }
1676
1677 if (pce_dev->src_nents) {
1678 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1679 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1680 DMA_TO_DEVICE);
1681 }
1682 if (pce_dev->dst_nents) {
1683 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1684 DMA_FROM_DEVICE);
1685 }
1686 return rc;
1687}
1688EXPORT_SYMBOL(qce_aead_req);
1689
1690int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
1691{
1692 int rc = 0;
1693 struct qce_device *pce_dev = (struct qce_device *) handle;
1694 struct ablkcipher_request *areq = (struct ablkcipher_request *)
1695 c_req->areq;
1696
1697 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
1698 - areq->nbytes;
1699
1700 _chain_buffer_in_init(pce_dev);
1701 _chain_buffer_out_init(pce_dev);
1702
1703 pce_dev->src_nents = 0;
1704 pce_dev->dst_nents = 0;
1705
1706 /* cipher input */
1707 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
1708
1709 if (c_req->use_pmem != 1)
1710 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1711 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1712 DMA_TO_DEVICE);
1713 else
1714 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
1715 areq->src);
1716
1717 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
1718 rc = -ENOMEM;
1719 goto bad;
1720 }
1721
1722 /* cipher output */
1723 if (areq->src != areq->dst) {
1724 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
1725 if (c_req->use_pmem != 1)
1726 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1727 DMA_FROM_DEVICE);
1728 else
1729 dma_map_pmem_sg(&c_req->pmem->dst[0],
1730 pce_dev->dst_nents, areq->dst);
1731 };
1732 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
1733 rc = -ENOMEM;
1734 goto bad;
1735 }
1736
1737 /* pad data */
1738 if (pad_len) {
1739 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1740 pad_len) < 0) {
1741 rc = -ENOMEM;
1742 goto bad;
1743 }
1744 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1745 pad_len) < 0) {
1746 rc = -ENOMEM;
1747 goto bad;
1748 }
1749 }
1750
1751 /* finalize the ce_in and ce_out channels command lists */
1752 _ce_in_final(pce_dev, areq->nbytes + pad_len);
1753 _ce_out_final(pce_dev, areq->nbytes + pad_len);
1754
1755 _ce_in_dump(pce_dev);
1756 _ce_out_dump(pce_dev);
1757
1758 /* set up crypto device */
1759 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
1760 if (rc < 0)
1761 goto bad;
1762
1763 /* setup for callback, and issue command to adm */
1764 pce_dev->areq = areq;
1765 pce_dev->qce_cb = c_req->qce_cb;
1766 if (c_req->use_pmem == 1) {
1767 pce_dev->chan_ce_in_cmd->complete_func =
1768 _ablk_cipher_ce_in_call_back_pmem;
1769 pce_dev->chan_ce_out_cmd->complete_func =
1770 _ablk_cipher_ce_out_call_back_pmem;
1771 } else {
1772 pce_dev->chan_ce_in_cmd->complete_func =
1773 _ablk_cipher_ce_in_call_back;
1774 pce_dev->chan_ce_out_cmd->complete_func =
1775 _ablk_cipher_ce_out_call_back;
1776 }
1777 rc = _qce_start_dma(pce_dev, true, true);
1778
1779 if (rc == 0)
1780 return 0;
1781bad:
1782 if (c_req->use_pmem != 1) {
1783 if (pce_dev->dst_nents) {
1784 dma_unmap_sg(pce_dev->pdev, areq->dst,
1785 pce_dev->dst_nents, DMA_FROM_DEVICE);
1786 }
1787 if (pce_dev->src_nents) {
1788 dma_unmap_sg(pce_dev->pdev, areq->src,
1789 pce_dev->src_nents,
1790 (areq->src == areq->dst) ?
1791 DMA_BIDIRECTIONAL :
1792 DMA_TO_DEVICE);
1793 }
1794 }
1795 return rc;
1796}
1797EXPORT_SYMBOL(qce_ablk_cipher_req);
1798
1799int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
1800{
1801 struct qce_device *pce_dev = (struct qce_device *) handle;
1802 int rc;
1803 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
1804 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
1805
1806 _chain_buffer_in_init(pce_dev);
1807 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
1808 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
1809 DMA_TO_DEVICE);
1810
1811 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
1812 rc = -ENOMEM;
1813 goto bad;
1814 }
1815
1816 if (pad_len) {
1817 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1818 pad_len) < 0) {
1819 rc = -ENOMEM;
1820 goto bad;
1821 }
1822 }
1823 _ce_in_final(pce_dev, sreq->size + pad_len);
1824
1825 _ce_in_dump(pce_dev);
1826
1827 rc = _ce_setup_hash(pce_dev, sreq);
1828
1829 if (rc < 0)
1830 goto bad;
1831
1832 pce_dev->areq = areq;
1833 pce_dev->qce_cb = sreq->qce_cb;
1834 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
1835
1836 rc = _qce_start_dma(pce_dev, true, false);
1837
1838 if (rc == 0)
1839 return 0;
1840bad:
1841 if (pce_dev->src_nents) {
1842 dma_unmap_sg(pce_dev->pdev, sreq->src,
1843 pce_dev->src_nents, DMA_TO_DEVICE);
1844 }
1845
1846 return rc;
1847}
1848EXPORT_SYMBOL(qce_process_sha_req);
1849
1850/* crypto engine open function. */
1851void *qce_open(struct platform_device *pdev, int *rc)
1852{
1853 struct qce_device *pce_dev;
1854 struct resource *resource;
1855 struct clk *ce_core_clk;
1856 struct clk *ce_clk;
1857
1858 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
1859 if (!pce_dev) {
1860 *rc = -ENOMEM;
1861 dev_err(&pdev->dev, "Can not allocate memory\n");
1862 return NULL;
1863 }
1864 pce_dev->pdev = &pdev->dev;
1865
1866 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1867 if (!resource) {
1868 *rc = -ENXIO;
1869 dev_err(pce_dev->pdev, "Missing MEM resource\n");
1870 goto err_pce_dev;
1871 };
1872 pce_dev->phy_iobase = resource->start;
1873 pce_dev->iobase = ioremap_nocache(resource->start,
1874 resource->end - resource->start + 1);
1875 if (!pce_dev->iobase) {
1876 *rc = -ENOMEM;
1877 dev_err(pce_dev->pdev, "Can not map io memory\n");
1878 goto err_pce_dev;
1879 }
1880
1881 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1882 GFP_KERNEL);
1883 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1884 GFP_KERNEL);
1885 if (pce_dev->chan_ce_in_cmd == NULL ||
1886 pce_dev->chan_ce_out_cmd == NULL) {
1887 dev_err(pce_dev->pdev, "Can not allocate memory\n");
1888 *rc = -ENOMEM;
1889 goto err_dm_chan_cmd;
1890 }
1891
1892 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1893 "crypto_channels");
1894 if (!resource) {
1895 *rc = -ENXIO;
1896 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
1897 goto err_dm_chan_cmd;
1898 };
1899 pce_dev->chan_ce_in = resource->start;
1900 pce_dev->chan_ce_out = resource->end;
1901 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1902 "crypto_crci_in");
1903 if (!resource) {
1904 *rc = -ENXIO;
1905 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
1906 goto err_dm_chan_cmd;
1907 };
1908 pce_dev->crci_in = resource->start;
1909 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1910 "crypto_crci_out");
1911 if (!resource) {
1912 *rc = -ENXIO;
1913 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
1914 goto err_dm_chan_cmd;
1915 };
1916 pce_dev->crci_out = resource->start;
1917
1918 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
1919 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
1920
1921 if (pce_dev->coh_vmem == NULL) {
1922 *rc = -ENOMEM;
1923 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
1924 goto err;
1925 }
1926
1927 /* Get CE core clk */
1928 ce_core_clk = clk_get(pce_dev->pdev, "ce_clk");
1929 if (IS_ERR(ce_core_clk)) {
1930 *rc = PTR_ERR(ce_core_clk);
1931 goto err;
1932 }
1933 pce_dev->ce_core_clk = ce_core_clk;
1934 /* Get CE clk */
1935 ce_clk = clk_get(pce_dev->pdev, "ce_pclk");
1936 if (IS_ERR(ce_clk)) {
1937 *rc = PTR_ERR(ce_clk);
1938 clk_put(pce_dev->ce_core_clk);
1939 goto err;
1940 }
1941 pce_dev->ce_clk = ce_clk;
1942
1943 /* Enable CE core clk */
1944 *rc = clk_enable(pce_dev->ce_core_clk);
1945 if (*rc) {
1946 clk_put(pce_dev->ce_core_clk);
1947 clk_put(pce_dev->ce_clk);
1948 goto err;
1949 } else {
1950 /* Enable CE clk */
1951 *rc = clk_enable(pce_dev->ce_clk);
1952 if (*rc) {
1953 clk_disable(pce_dev->ce_core_clk);
1954 clk_put(pce_dev->ce_core_clk);
1955 clk_put(pce_dev->ce_clk);
1956 goto err;
1957
1958 }
1959 }
1960 _setup_cmd_template(pce_dev);
1961
1962 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1963 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1964
1965 if (_init_ce_engine(pce_dev)) {
1966 *rc = -ENXIO;
1967 goto err;
1968 }
1969 *rc = 0;
1970 return pce_dev;
1971
1972err:
1973 if (pce_dev->coh_vmem)
1974 dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
1975 pce_dev->coh_pmem);
1976err_dm_chan_cmd:
1977 kfree(pce_dev->chan_ce_in_cmd);
1978 kfree(pce_dev->chan_ce_out_cmd);
1979 if (pce_dev->iobase)
1980 iounmap(pce_dev->iobase);
1981
1982err_pce_dev:
1983
1984 kfree(pce_dev);
1985
1986 return NULL;
1987}
1988EXPORT_SYMBOL(qce_open);
1989
1990/* crypto engine close function. */
1991int qce_close(void *handle)
1992{
1993 struct qce_device *pce_dev = (struct qce_device *) handle;
1994
1995 if (handle == NULL)
1996 return -ENODEV;
1997 if (pce_dev->iobase)
1998 iounmap(pce_dev->iobase);
1999
2000 if (pce_dev->coh_vmem)
2001 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
2002 pce_dev->coh_pmem);
2003 clk_disable(pce_dev->ce_clk);
2004 clk_disable(pce_dev->ce_core_clk);
2005
2006 clk_put(pce_dev->ce_clk);
2007 clk_put(pce_dev->ce_core_clk);
2008
2009 kfree(pce_dev->chan_ce_in_cmd);
2010 kfree(pce_dev->chan_ce_out_cmd);
2011 kfree(handle);
2012
2013 return 0;
2014}
2015EXPORT_SYMBOL(qce_close);
2016
2017int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2018{
2019 if (ce_support == NULL)
2020 return -EINVAL;
2021
2022 ce_support->sha1_hmac_20 = false;
2023 ce_support->sha1_hmac = false;
2024 ce_support->sha256_hmac = false;
2025 ce_support->sha_hmac = false;
2026 ce_support->cmac = true;
2027 ce_support->aes_key_192 = false;
2028 ce_support->aes_xts = true;
2029 ce_support->aes_ccm = true;
2030 ce_support->ota = false;
2031 return 0;
2032}
2033EXPORT_SYMBOL(qce_hw_support);
2034
2035MODULE_LICENSE("GPL v2");
2036MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2037MODULE_DESCRIPTION("Crypto Engine driver");
2038MODULE_VERSION("2.04");