blob: 4def6185e38fa562da8aca153829632ffdad9406 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070027#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <crypto/hash.h>
29#include <crypto/sha.h>
30#include <mach/dma.h>
31#include <mach/clk.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070032
33#include "qce.h"
34#include "qcryptohw_40.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
36/* ADM definitions */
37#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
38#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
39#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
40#define ADM_DESC_LAST (1 << 31)
41
42/* Data xfer between DM and CE in blocks of 16 bytes */
43#define ADM_CE_BLOCK_SIZE 16
44
45#define ADM_DESC_LENGTH_MASK 0xffff
46#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
47
48struct dmov_desc {
49 uint32_t addr;
50 uint32_t len;
51};
52
53#define ADM_STATUS_OK 0x80000002
54
55/* Misc definitions */
56
57/* QCE max number of descriptor in a descriptor list */
58#define QCE_MAX_NUM_DESC 128
59
60/* State of DM channel */
61enum qce_chan_st_enum {
62 QCE_CHAN_STATE_IDLE = 0,
63 QCE_CHAN_STATE_IN_PROG = 1,
64 QCE_CHAN_STATE_COMP = 2,
65 QCE_CHAN_STATE_LAST
66};
67
68/*
69 * CE HW device structure.
70 * Each engine has an instance of the structure.
71 * Each engine can only handle one crypto operation at one time. It is up to
72 * the sw above to ensure single threading of operation on an engine.
73 */
74struct qce_device {
75 struct device *pdev; /* Handle to platform_device structure */
76 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
77 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
78 void __iomem *iobase; /* Virtual io base of CE HW */
79 unsigned int phy_iobase; /* Physical io base of CE HW */
80 struct clk *ce_core_clk; /* Handle to CE clk */
81 struct clk *ce_clk; /* Handle to CE clk */
82 unsigned int crci_in; /* CRCI for CE DM IN Channel */
83 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
84 unsigned int chan_ce_in; /* ADM channel used for CE input
85 * and auth result if authentication
86 * only operation. */
87 unsigned int chan_ce_out; /* ADM channel used for CE output,
88 and icv for esp */
89 unsigned int *cmd_pointer_list_ce_in;
90 dma_addr_t phy_cmd_pointer_list_ce_in;
91
92 unsigned int *cmd_pointer_list_ce_out;
93 dma_addr_t phy_cmd_pointer_list_ce_out;
94
95 unsigned char *cmd_list_ce_in;
96 dma_addr_t phy_cmd_list_ce_in;
97
98 unsigned char *cmd_list_ce_out;
99 dma_addr_t phy_cmd_list_ce_out;
100
101 struct dmov_desc *ce_out_src_desc;
102 dma_addr_t phy_ce_out_src_desc;
103
104 struct dmov_desc *ce_out_dst_desc;
105 dma_addr_t phy_ce_out_dst_desc;
106
107 struct dmov_desc *ce_in_src_desc;
108 dma_addr_t phy_ce_in_src_desc;
109
110 struct dmov_desc *ce_in_dst_desc;
111 dma_addr_t phy_ce_in_dst_desc;
112
113 unsigned char *ce_out_ignore;
114 dma_addr_t phy_ce_out_ignore;
115
116 unsigned char *ce_pad;
117 dma_addr_t phy_ce_pad;
118
119 struct msm_dmov_cmd *chan_ce_in_cmd;
120 struct msm_dmov_cmd *chan_ce_out_cmd;
121
122 uint32_t ce_out_ignore_size;
123
124 int ce_out_dst_desc_index;
125 int ce_in_dst_desc_index;
126
127 int ce_out_src_desc_index;
128 int ce_in_src_desc_index;
129
130 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
131 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
132
133 int chan_ce_in_status; /* chan ce_in status */
134 int chan_ce_out_status; /* chan ce_out status */
135
136 unsigned char *dig_result;
137 dma_addr_t phy_dig_result;
138
139 /* cached aes key */
140 uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
141
142 uint32_t cipher_key_size; /* cached aes key size in bytes */
143 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
144
145 int assoc_nents;
146 int ivsize;
147 int authsize;
148 int src_nents;
149 int dst_nents;
150
151 void *areq;
152 enum qce_cipher_mode_enum mode;
153
154 dma_addr_t phy_iv_in;
155};
156
157/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
158static uint32_t _std_init_vector_sha1[] = {
159 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
160};
161/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
162static uint32_t _std_init_vector_sha256[] = {
163 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
164 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
165};
166
167static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
168 unsigned int len)
169{
170 unsigned n;
171
172 n = len / sizeof(uint32_t) ;
173 for (; n > 0; n--) {
174 *iv = ((*b << 24) & 0xff000000) |
175 (((*(b+1)) << 16) & 0xff0000) |
176 (((*(b+2)) << 8) & 0xff00) |
177 (*(b+3) & 0xff);
178 b += sizeof(uint32_t);
179 iv++;
180 }
181
182 n = len % sizeof(uint32_t);
183 if (n == 3) {
184 *iv = ((*b << 24) & 0xff000000) |
185 (((*(b+1)) << 16) & 0xff0000) |
186 (((*(b+2)) << 8) & 0xff00) ;
187 } else if (n == 2) {
188 *iv = ((*b << 24) & 0xff000000) |
189 (((*(b+1)) << 16) & 0xff0000) ;
190 } else if (n == 1) {
191 *iv = ((*b << 24) & 0xff000000) ;
192 }
193}
194
195static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
196 unsigned int len)
197{
198 unsigned i, j;
199 unsigned char swap_iv[AES_IV_LENGTH];
200
201 memset(swap_iv, 0, AES_IV_LENGTH);
202 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
203 swap_iv[i] = b[j];
204 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
205}
206
207static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
208 unsigned int len)
209{
210 unsigned n = len / sizeof(uint32_t);
211
212 for (; n > 0; n--) {
213 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
214 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
215 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
216 *b++ = (unsigned char) (*iv & 0xff);
217 iv++;
218 }
219 n = len % sizeof(uint32_t);
220 if (n == 3) {
221 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
222 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
223 *b = (unsigned char) ((*iv >> 8) & 0xff);
224 } else if (n == 2) {
225 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
226 *b = (unsigned char) ((*iv >> 16) & 0xff);
227 } else if (n == 1) {
228 *b = (unsigned char) ((*iv >> 24) & 0xff);
229 }
230}
231
232static int count_sg(struct scatterlist *sg, int nbytes)
233{
234 int i;
235
236 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
237 nbytes -= sg->length;
238 return i;
239}
240
241static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
242 struct scatterlist *sg)
243{
244 int i;
245 for (i = 0; i < entries; i++) {
246
247 sg->dma_address = (dma_addr_t)pmem->offset;
248 sg++;
249 pmem++;
250 }
251 return 0;
252}
253
254static int _probe_ce_engine(struct qce_device *pce_dev)
255{
256 unsigned int val;
257 unsigned int rev;
258
259 val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
260 if (((val & 0xfffffff) != 0x0000042) &&
261 ((val & 0xfffffff) != 0x0000040)) {
262 dev_err(pce_dev->pdev,
263 "Unknown Qualcomm crypto device at 0x%x 0x%x\n",
264 pce_dev->phy_iobase, val);
265 return -EIO;
266 };
267 rev = (val & CRYPTO_CORE_REV_MASK);
268 if (rev == 0x42) {
269 dev_info(pce_dev->pdev,
270 "Qualcomm Crypto 4.2 device found at 0x%x\n",
271 pce_dev->phy_iobase);
272 } else {
273 if (rev == 0x40) {
274 dev_info(pce_dev->pdev,
275 "Qualcomm Crypto 4.0 device found at 0x%x\n",
276 pce_dev->phy_iobase);
277 }
278 }
279
280 dev_info(pce_dev->pdev,
281 "IO base 0x%x, ce_in channel %d, "
282 "ce_out channel %d, "
283 "crci_in %d, crci_out %d\n",
284 (unsigned int) pce_dev->iobase,
285 pce_dev->chan_ce_in, pce_dev->chan_ce_out,
286 pce_dev->crci_in, pce_dev->crci_out);
287
288 pce_dev->cipher_key_size = 0;
289
290 return 0;
291};
292
293static int _init_ce_engine(struct qce_device *pce_dev)
294{
295 unsigned int val;
296
297 /* Reset ce */
298 clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
299 clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
300 /*
301 * Ensure previous instruction (any writes to CLK registers)
302 * to toggle the CLK reset lines was completed.
303 */
304 dsb();
305 /* configure ce */
306 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
307 (1 << CRYPTO_MASK_OP_DONE_INTR) |
308 (1 << CRYPTO_MASK_ERR_INTR);
309 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
310 /*
311 * Ensure previous instruction (writel_relaxed to config register bit)
312 * was completed.
313 */
314 dsb();
315 val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
316 if (!val) {
317 dev_err(pce_dev->pdev,
318 "unknown Qualcomm crypto device at 0x%x\n",
319 pce_dev->phy_iobase);
320 return -EIO;
321 };
322 if (_probe_ce_engine(pce_dev) < 0)
323 return -EIO;
324 return 0;
325};
326
327static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
328{
329 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
330 uint32_t diglen;
331 int i;
332 uint32_t auth_cfg = 0;
333 bool sha1 = false;
334
335 if (sreq->alg == QCE_HASH_AES_CMAC) {
336 uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
337 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
338 uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
339 /* Clear auth_ivn, auth_keyn registers */
340 for (i = 0; i < 16; i++) {
341 writel_relaxed(0, (pce_dev->iobase +
342 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
343 writel_relaxed(0, (pce_dev->iobase +
344 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
345 }
346 /* write auth_bytecnt 0/1/2/3, start with 0 */
347 for (i = 0; i < 4; i++)
348 writel_relaxed(0, pce_dev->iobase +
349 CRYPTO_AUTH_BYTECNT0_REG +
350 i * sizeof(uint32_t));
351
352 _byte_stream_to_net_words(authkey32, sreq->authkey,
353 sreq->authklen);
354 for (i = 0; i < authklen32; i++)
355 writel_relaxed(authkey32[i], pce_dev->iobase +
356 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
357 /*
358 * write seg_cfg
359 */
360 auth_cfg |= (1 << CRYPTO_LAST);
361 auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
362 auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
363 CRYPTO_AUTH_SIZE);
364 auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
365
366 switch (sreq->authklen) {
367 case AES128_KEY_SIZE:
368 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
369 CRYPTO_AUTH_KEY_SIZE);
370 break;
371 case AES256_KEY_SIZE:
372 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
373 CRYPTO_AUTH_KEY_SIZE);
374 break;
375 default:
376 break;
377 }
378
379 goto go_proc;
380 }
381
382 /* if not the last, the size has to be on the block boundary */
383 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
384 return -EIO;
385
386 switch (sreq->alg) {
387 case QCE_HASH_SHA1:
388 case QCE_HASH_SHA1_HMAC:
389 diglen = SHA1_DIGEST_SIZE;
390 sha1 = true;
391 break;
392 case QCE_HASH_SHA256:
393 case QCE_HASH_SHA256_HMAC:
394 diglen = SHA256_DIGEST_SIZE;
395 break;
396 default:
397 return -EINVAL;
398 }
399
400 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
401 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
402 uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
403 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
404 uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
405
406 _byte_stream_to_net_words(hmackey, sreq->authkey,
407 sreq->authklen);
408 /* write hmac key */
409 for (i = 0; i < hmacklen; i++)
410 writel_relaxed(hmackey[i], pce_dev->iobase +
411 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
412
413 auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
414 } else {
415 auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
416 }
417
418 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
419
420 if (sreq->first_blk) {
421 if (sha1) {
422 for (i = 0; i < 5; i++)
423 auth32[i] = _std_init_vector_sha1[i];
424 } else {
425 for (i = 0; i < 8; i++)
426 auth32[i] = _std_init_vector_sha256[i];
427 }
428 } else {
429 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
430 }
431
432 for (i = 0; i < 5; i++)
433 writel_relaxed(auth32[i], (pce_dev->iobase +
434 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
435
436 if ((sreq->alg == QCE_HASH_SHA256) ||
437 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
438 writel_relaxed(auth32[5], pce_dev->iobase +
439 CRYPTO_AUTH_IV5_REG);
440 writel_relaxed(auth32[6], pce_dev->iobase +
441 CRYPTO_AUTH_IV6_REG);
442 writel_relaxed(auth32[7], pce_dev->iobase +
443 CRYPTO_AUTH_IV7_REG);
444 }
445
446 /* write auth_bytecnt 0/1, start with 0 */
447 for (i = 0; i < 4; i++)
448 writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
449 (CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
450
451 /* write seg_cfg */
452 if (sha1)
453 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
454 else
455 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
456
457 if (sreq->last_blk)
458 auth_cfg |= 1 << CRYPTO_LAST;
459
460 auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
461
462go_proc:
463 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
464
465 /* write seg_cfg */
466 writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
467
468 /* write seg_size */
469 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
470
471 /* write auth_seg_size */
472 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
473
474 /* write auth_seg_start */
475 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
476 /*
477 * Ensure previous instructions (write to all AUTH registers)
478 * was completed before accessing a register that is not in
479 * in the same 1K range.
480 */
481 dsb();
482
483 writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
484 /*
485 * Ensure previous instructions (setting all the CE registers)
486 * was completed before writing to GO register
487 */
488 dsb();
489 /* issue go to crypto */
490 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
491 /*
492 * Ensure previous instructions (setting the GO register)
493 * was completed before issuing a DMA transfer request
494 */
495 dsb();
496
497 return 0;
498}
499
500static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
501 uint32_t totallen_in, uint32_t coffset)
502{
503 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
504 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
505 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
506 0, 0, 0, 0};
507 uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
508 int aes_key_chg;
509 int i;
510 uint32_t encr_cfg = 0;
511 uint32_t ivsize = creq->ivsize;
512
513 if (creq->mode == QCE_MODE_XTS)
514 _byte_stream_to_net_words(enckey32, creq->enckey,
515 creq->encklen/2);
516 else
517 _byte_stream_to_net_words(enckey32, creq->enckey,
518 creq->encklen);
519
520 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
521 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
522 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
523 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
524 uint32_t auth_cfg = 0;
525
526 /* Clear auth_ivn, auth_keyn registers */
527 for (i = 0; i < 16; i++) {
528 writel_relaxed(0, (pce_dev->iobase +
529 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
530 writel_relaxed(0, (pce_dev->iobase +
531 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
532 }
533 /* write auth_bytecnt 0/1/2/3, start with 0 */
534 for (i = 0; i < 4; i++)
535 writel_relaxed(0, pce_dev->iobase +
536 CRYPTO_AUTH_BYTECNT0_REG +
537 i * sizeof(uint32_t));
538 /* write auth key */
539 for (i = 0; i < authklen32; i++)
540 writel_relaxed(enckey32[i], pce_dev->iobase +
541 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
542
543 /* write nonce */
544 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
545 for (i = 0; i < noncelen32; i++)
546 writel_relaxed(nonce32[i], pce_dev->iobase +
547 CRYPTO_AUTH_INFO_NONCE0_REG +
548 (i*sizeof(uint32_t)));
549
550 auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
551 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
552 auth_cfg |= (1 << CRYPTO_LAST);
553 if (creq->dir == QCE_ENCRYPT)
554 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
555 else
556 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
557 auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
558 auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
559 if (creq->authklen == AES128_KEY_SIZE)
560 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
561 CRYPTO_AUTH_KEY_SIZE);
562 else {
563 if (creq->authklen == AES256_KEY_SIZE)
564 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
565 CRYPTO_AUTH_KEY_SIZE);
566 }
567 auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
568 writel_relaxed(auth_cfg, pce_dev->iobase +
569 CRYPTO_AUTH_SEG_CFG_REG);
570 if (creq->dir == QCE_ENCRYPT)
571 writel_relaxed(totallen_in, pce_dev->iobase +
572 CRYPTO_AUTH_SEG_SIZE_REG);
573 else
574 writel_relaxed((totallen_in - creq->authsize),
575 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
576 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
577 } else {
578 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
579 }
580 /*
581 * Ensure previous instructions (write to all AUTH registers)
582 * was completed before accessing a register that is not in
583 * in the same 1K range.
584 */
585 dsb();
586
587 switch (creq->mode) {
588 case QCE_MODE_ECB:
589 encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
590 break;
591
592 case QCE_MODE_CBC:
593 encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
594 break;
595
596 case QCE_MODE_XTS:
597 encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
598 break;
599
600 case QCE_MODE_CCM:
601 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
602 break;
603
604 case QCE_MODE_CTR:
605 default:
606 encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
607 break;
608 }
609 pce_dev->mode = creq->mode;
610
611 switch (creq->alg) {
612 case CIPHER_ALG_DES:
613 if (creq->mode != QCE_MODE_ECB) {
614 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
615 writel_relaxed(enciv32[0], pce_dev->iobase +
616 CRYPTO_CNTR0_IV0_REG);
617 writel_relaxed(enciv32[1], pce_dev->iobase +
618 CRYPTO_CNTR1_IV1_REG);
619 }
620 writel_relaxed(enckey32[0], pce_dev->iobase +
621 CRYPTO_ENCR_KEY0_REG);
622 writel_relaxed(enckey32[1], pce_dev->iobase +
623 CRYPTO_ENCR_KEY1_REG);
624 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
625 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
626 break;
627
628 case CIPHER_ALG_3DES:
629 if (creq->mode != QCE_MODE_ECB) {
630 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
631 writel_relaxed(enciv32[0], pce_dev->iobase +
632 CRYPTO_CNTR0_IV0_REG);
633 writel_relaxed(enciv32[1], pce_dev->iobase +
634 CRYPTO_CNTR1_IV1_REG);
635 }
636 for (i = 0; i < 6; i++)
637 writel_relaxed(enckey32[0], (pce_dev->iobase +
638 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
639
640 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
641 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
642 break;
643
644 case CIPHER_ALG_AES:
645 default:
646 if (creq->mode == QCE_MODE_XTS) {
647 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
648 = {0, 0, 0, 0, 0, 0, 0, 0};
649 uint32_t xtsklen =
650 creq->encklen/(2 * sizeof(uint32_t));
651
652 _byte_stream_to_net_words(xtskey32, (creq->enckey +
653 creq->encklen/2), creq->encklen/2);
654 for (i = 0; i < xtsklen; i++)
655 writel_relaxed(xtskey32[i], pce_dev->iobase +
656 CRYPTO_ENCR_XTS_KEY0_REG +
657 (i * sizeof(uint32_t)));
658
659 writel_relaxed(creq->cryptlen ,
660 pce_dev->iobase +
661 CRYPTO_ENCR_XTS_DU_SIZE_REG);
662 }
663 if (creq->mode != QCE_MODE_ECB) {
664 if (creq->mode == QCE_MODE_XTS)
665 _byte_stream_swap_to_net_words(enciv32,
666 creq->iv, ivsize);
667 else
668 _byte_stream_to_net_words(enciv32, creq->iv,
669 ivsize);
670 for (i = 0; i <= 3; i++)
671 writel_relaxed(enciv32[i], pce_dev->iobase +
672 CRYPTO_CNTR0_IV0_REG +
673 (i * sizeof(uint32_t)));
674 }
675 /* set number of counter bits */
676 writel_relaxed(0xffffffff, pce_dev->iobase +
677 CRYPTO_CNTR_MASK_REG);
678
679 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
680 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
681 CRYPTO_ENCR_KEY_SZ);
682 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
683 } else {
684 uint32_t key_size;
685
686 if (creq->mode == QCE_MODE_XTS) {
687 key_size = creq->encklen/2;
688 enck_size_in_word = key_size/sizeof(uint32_t);
689 } else {
690 key_size = creq->encklen;
691 }
692
693 switch (key_size) {
694 case AES128_KEY_SIZE:
695 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
696 CRYPTO_ENCR_KEY_SZ);
697 break;
698 case AES256_KEY_SIZE:
699 default:
700 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
701 CRYPTO_ENCR_KEY_SZ);
702
703 /* check for null key. If null, use hw key*/
704 for (i = 0; i < enck_size_in_word; i++) {
705 if (enckey32[i] != 0)
706 break;
707 }
708 if (i == enck_size_in_word)
709 encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
710 break;
711 } /* end of switch (creq->encklen) */
712
713 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
714 if (pce_dev->cipher_key_size != creq->encklen)
715 aes_key_chg = 1;
716 else {
717 for (i = 0; i < enck_size_in_word; i++) {
718 if (enckey32[i]
719 != pce_dev->cipher_key[i])
720 break;
721 }
722 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
723 }
724
725 if (aes_key_chg) {
726 for (i = 0; i < enck_size_in_word; i++)
727 writel_relaxed(enckey32[i],
728 pce_dev->iobase +
729 CRYPTO_ENCR_KEY0_REG +
730 (i * sizeof(uint32_t)));
731 pce_dev->cipher_key_size = creq->encklen;
732 for (i = 0; i < enck_size_in_word; i++)
733 pce_dev->cipher_key[i] = enckey32[i];
734 } /*if (aes_key_chg) { */
735 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
736 break;
737 } /* end of switch (creq->mode) */
738
739 /* write encr seg cfg */
740 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
741
742 /* write encr seg cfg */
743 writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
744
745 /* write encr seg size */
746 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
747 writel_relaxed((creq->cryptlen + creq->authsize),
748 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
749 else
750 writel_relaxed(creq->cryptlen,
751 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
752 /* write encr seg start */
753 writel_relaxed((coffset & 0xffff),
754 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
755 /* write seg size */
756 writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
757 /*
758 * Ensure previous instructions (setting all the CE registers)
759 * was completed before writing to GO register
760 */
761 dsb();
762 /* issue go to crypto */
763 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
764 /*
765 * Ensure previous instructions (setting the GO register)
766 * was completed before issuing a DMA transfer request
767 */
768 dsb();
769 return 0;
770};
771
772static int _aead_complete(struct qce_device *pce_dev)
773{
774 struct aead_request *areq;
775 int i;
776 uint32_t ivsize;
777 uint32_t iv_out[4];
778 unsigned char iv[4 * sizeof(uint32_t)];
779
780 areq = (struct aead_request *) pce_dev->areq;
781 ivsize = pce_dev->ivsize;
782
783 if (areq->src != areq->dst) {
784 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
785 DMA_FROM_DEVICE);
786 }
787 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
788 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
789 DMA_TO_DEVICE);
790
791 if (pce_dev->mode != QCE_MODE_CCM)
792 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
793 ivsize, DMA_TO_DEVICE);
794 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
795 DMA_TO_DEVICE);
796
797 /* get iv out */
798 if ((pce_dev->mode == QCE_MODE_ECB) ||
799 (pce_dev->mode == QCE_MODE_CCM)) {
800 if (pce_dev->mode == QCE_MODE_CCM) {
801 int result;
802 result = readl_relaxed(pce_dev->iobase +
803 CRYPTO_STATUS_REG);
804 result &= (1 << CRYPTO_MAC_FAILED);
805 result |= (pce_dev->chan_ce_in_status |
806 pce_dev->chan_ce_out_status);
807 dsb();
808 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
809 result);
810 } else {
811 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
812 pce_dev->chan_ce_in_status |
813 pce_dev->chan_ce_out_status);
814 }
815 } else {
816 for (i = 0; i < 4; i++)
817 iv_out[i] = readl_relaxed(pce_dev->iobase +
818 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
819
820 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
821 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
822 pce_dev->chan_ce_in_status |
823 pce_dev->chan_ce_out_status);
824 };
825 return 0;
826};
827
828static void _sha_complete(struct qce_device *pce_dev)
829{
830
831 struct ahash_request *areq;
832 uint32_t auth_data[4];
833 uint32_t digest[8];
834 int i;
835
836 areq = (struct ahash_request *) pce_dev->areq;
837 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
838 DMA_TO_DEVICE);
839
840 for (i = 0; i < 4; i++)
841 auth_data[i] = readl_relaxed(pce_dev->iobase +
842 (CRYPTO_AUTH_BYTECNT0_REG +
843 i * sizeof(uint32_t)));
844
845 for (i = 0; i < 8; i++)
846 digest[i] = readl_relaxed(pce_dev->iobase +
847 CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
848
849 _net_words_to_byte_stream(digest, pce_dev->dig_result,
850 SHA256_DIGEST_SIZE);
851
852 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
853 pce_dev->chan_ce_in_status);
854};
855
856static int _ablk_cipher_complete(struct qce_device *pce_dev)
857{
858 struct ablkcipher_request *areq;
859 uint32_t iv_out[4];
860 unsigned char iv[4 * sizeof(uint32_t)];
861
862 areq = (struct ablkcipher_request *) pce_dev->areq;
863
864 if (areq->src != areq->dst) {
865 dma_unmap_sg(pce_dev->pdev, areq->dst,
866 pce_dev->dst_nents, DMA_FROM_DEVICE);
867 }
868 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
869 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
870 DMA_TO_DEVICE);
871 /* get iv out */
872 if (pce_dev->mode == QCE_MODE_ECB) {
873 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
874 pce_dev->chan_ce_out_status);
875 } else {
876 int i;
877
878 for (i = 0; i < 4; i++)
879 iv_out[i] = readl_relaxed(pce_dev->iobase +
880 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
881
882 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
883 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
884 pce_dev->chan_ce_out_status);
885 }
886
887 return 0;
888};
889
890static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
891{
892 struct ablkcipher_request *areq;
893 uint32_t iv_out[4];
894 unsigned char iv[4 * sizeof(uint32_t)];
895
896 areq = (struct ablkcipher_request *) pce_dev->areq;
897
898 /* get iv out */
899 if (pce_dev->mode == QCE_MODE_ECB) {
900 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
901 pce_dev->chan_ce_out_status);
902 } else {
903 int i;
904
905 for (i = 0; i < 4; i++)
906 iv_out[i] = readl_relaxed(pce_dev->iobase +
907 CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
908
909 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
910 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
911 pce_dev->chan_ce_out_status);
912 }
913
914 return 0;
915};
916
917static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
918 unsigned int plen, unsigned int paddr, int *index)
919{
920 while (plen > 0x8000) {
921 pdesc->len = 0x8000;
922 if (paddr > 0) {
923 pdesc->addr = paddr;
924 paddr += 0x8000;
925 }
926 plen -= pdesc->len;
927 if (plen > 0) {
928 *index = (*index) + 1;
929 if ((*index) >= QCE_MAX_NUM_DESC)
930 return -ENOMEM;
931 pdesc++;
932 }
933 }
934 if ((plen > 0) && (plen <= 0x8000)) {
935 pdesc->len = plen;
936 if (paddr > 0)
937 pdesc->addr = paddr;
938 }
939
940 return 0;
941}
942
943static int _chain_sg_buffer_in(struct qce_device *pce_dev,
944 struct scatterlist *sg, unsigned int nbytes)
945{
946 unsigned int len;
947 unsigned int dlen;
948 struct dmov_desc *pdesc;
949
950 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
951 if (nbytes > 0x8000)
952 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
953 &pce_dev->ce_in_dst_desc_index);
954 else
955 pdesc->len = nbytes;
956
957 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
958 /*
959 * Two consective chunks may be handled by the old
960 * buffer descriptor.
961 */
962 while (nbytes > 0) {
963 len = min(nbytes, sg_dma_len(sg));
964 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
965 nbytes -= len;
966 if (dlen == 0) {
967 pdesc->addr = sg_dma_address(sg);
968 pdesc->len = len;
969 if (pdesc->len > 0x8000)
970 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
971 sg_dma_address(sg),
972 &pce_dev->ce_in_src_desc_index);
973 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
974 pdesc->len = dlen + len;
975 if (pdesc->len > 0x8000)
976 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
977 pdesc->addr,
978 &pce_dev->ce_in_src_desc_index);
979 } else {
980 pce_dev->ce_in_src_desc_index++;
981 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
982 return -ENOMEM;
983 pdesc++;
984 pdesc->len = len;
985 pdesc->addr = sg_dma_address(sg);
986 if (pdesc->len > 0x8000)
987 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
988 sg_dma_address(sg),
989 &pce_dev->ce_in_src_desc_index);
990 }
991 if (nbytes > 0)
992 sg = sg_next(sg);
993 }
994 return 0;
995}
996
997static int _chain_pm_buffer_in(struct qce_device *pce_dev,
998 unsigned int pmem, unsigned int nbytes)
999{
1000 unsigned int dlen;
1001 struct dmov_desc *pdesc;
1002
1003 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1004 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1005 if (dlen == 0) {
1006 pdesc->addr = pmem;
1007 pdesc->len = nbytes;
1008 } else if (pmem == (pdesc->addr + dlen)) {
1009 pdesc->len = dlen + nbytes;
1010 } else {
1011 pce_dev->ce_in_src_desc_index++;
1012 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1013 return -ENOMEM;
1014 pdesc++;
1015 pdesc->len = nbytes;
1016 pdesc->addr = pmem;
1017 }
1018 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1019 pdesc->len += nbytes;
1020
1021 return 0;
1022}
1023
1024static void _chain_buffer_in_init(struct qce_device *pce_dev)
1025{
1026 struct dmov_desc *pdesc;
1027
1028 pce_dev->ce_in_src_desc_index = 0;
1029 pce_dev->ce_in_dst_desc_index = 0;
1030 pdesc = pce_dev->ce_in_src_desc;
1031 pdesc->len = 0;
1032}
1033
1034static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
1035{
1036 struct dmov_desc *pdesc;
1037 dmov_sg *pcmd;
1038
1039 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1040 pdesc->len |= ADM_DESC_LAST;
1041 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1042 pdesc->len |= ADM_DESC_LAST;
1043
1044 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1045 pcmd->cmd |= CMD_LC;
1046}
1047
1048#ifdef QCE_DEBUG
1049static void _ce_in_dump(struct qce_device *pce_dev)
1050{
1051 int i;
1052 struct dmov_desc *pdesc;
1053
1054 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
1055 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1056 pdesc = pce_dev->ce_in_src_desc + i;
1057 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1058 pdesc->len);
1059 }
1060 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1061 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1062 pdesc = pce_dev->ce_in_dst_desc + i;
1063 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1064 pdesc->len);
1065 }
1066};
1067
1068static void _ce_out_dump(struct qce_device *pce_dev)
1069{
1070 int i;
1071 struct dmov_desc *pdesc;
1072
1073 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1074 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1075 pdesc = pce_dev->ce_out_src_desc + i;
1076 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1077 pdesc->len);
1078 }
1079
1080 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
1081 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1082 pdesc = pce_dev->ce_out_dst_desc + i;
1083 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1084 pdesc->len);
1085 }
1086};
1087
1088#else
1089
1090static void _ce_in_dump(struct qce_device *pce_dev)
1091{
1092};
1093
1094static void _ce_out_dump(struct qce_device *pce_dev)
1095{
1096};
1097
1098#endif
1099
1100static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1101 struct scatterlist *sg, unsigned int nbytes)
1102{
1103 unsigned int len;
1104 unsigned int dlen;
1105 struct dmov_desc *pdesc;
1106
1107 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1108 if (nbytes > 0x8000)
1109 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1110 &pce_dev->ce_out_src_desc_index);
1111 else
1112 pdesc->len = nbytes;
1113
1114 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1115 /*
1116 * Two consective chunks may be handled by the old
1117 * buffer descriptor.
1118 */
1119 while (nbytes > 0) {
1120 len = min(nbytes, sg_dma_len(sg));
1121 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1122 nbytes -= len;
1123 if (dlen == 0) {
1124 pdesc->addr = sg_dma_address(sg);
1125 pdesc->len = len;
1126 if (pdesc->len > 0x8000)
1127 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1128 sg_dma_address(sg),
1129 &pce_dev->ce_out_dst_desc_index);
1130 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1131 pdesc->len = dlen + len;
1132 if (pdesc->len > 0x8000)
1133 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1134 pdesc->addr,
1135 &pce_dev->ce_out_dst_desc_index);
1136
1137 } else {
1138 pce_dev->ce_out_dst_desc_index++;
1139 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1140 return -EIO;
1141 pdesc++;
1142 pdesc->len = len;
1143 pdesc->addr = sg_dma_address(sg);
1144 if (pdesc->len > 0x8000)
1145 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1146 sg_dma_address(sg),
1147 &pce_dev->ce_out_dst_desc_index);
1148
1149 }
1150 if (nbytes > 0)
1151 sg = sg_next(sg);
1152 }
1153 return 0;
1154}
1155
1156static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1157 unsigned int pmem, unsigned int nbytes)
1158{
1159 unsigned int dlen;
1160 struct dmov_desc *pdesc;
1161
1162 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1163 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1164
1165 if (dlen == 0) {
1166 pdesc->addr = pmem;
1167 pdesc->len = nbytes;
1168 } else if (pmem == (pdesc->addr + dlen)) {
1169 pdesc->len = dlen + nbytes;
1170 } else {
1171 pce_dev->ce_out_dst_desc_index++;
1172 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1173 return -EIO;
1174 pdesc++;
1175 pdesc->len = nbytes;
1176 pdesc->addr = pmem;
1177 }
1178 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1179 pdesc->len += nbytes;
1180
1181 return 0;
1182};
1183
1184static void _chain_buffer_out_init(struct qce_device *pce_dev)
1185{
1186 struct dmov_desc *pdesc;
1187
1188 pce_dev->ce_out_dst_desc_index = 0;
1189 pce_dev->ce_out_src_desc_index = 0;
1190 pdesc = pce_dev->ce_out_dst_desc;
1191 pdesc->len = 0;
1192};
1193
1194static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
1195{
1196 struct dmov_desc *pdesc;
1197 dmov_sg *pcmd;
1198
1199 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1200 pdesc->len |= ADM_DESC_LAST;
1201 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1202 pdesc->len |= ADM_DESC_LAST;
1203 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1204 pcmd->cmd |= CMD_LC;
1205};
1206
1207static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1208 unsigned int result, struct msm_dmov_errdata *err)
1209{
1210 struct qce_device *pce_dev;
1211
1212 pce_dev = (struct qce_device *) cmd_ptr->user;
1213 if (result != ADM_STATUS_OK) {
1214 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1215 result);
1216 pce_dev->chan_ce_in_status = -1;
1217 } else {
1218 pce_dev->chan_ce_in_status = 0;
1219 }
1220
1221 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1222 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1223 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1224 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1225
1226 /* done */
1227 _aead_complete(pce_dev);
1228 }
1229};
1230
1231static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1232 unsigned int result, struct msm_dmov_errdata *err)
1233{
1234 struct qce_device *pce_dev;
1235
1236 pce_dev = (struct qce_device *) cmd_ptr->user;
1237 if (result != ADM_STATUS_OK) {
1238 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1239 result);
1240 pce_dev->chan_ce_out_status = -1;
1241 } else {
1242 pce_dev->chan_ce_out_status = 0;
1243 };
1244
1245 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1246 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1247 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1248 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1249
1250 /* done */
1251 _aead_complete(pce_dev);
1252 }
1253
1254};
1255
1256static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1257 unsigned int result, struct msm_dmov_errdata *err)
1258{
1259 struct qce_device *pce_dev;
1260
1261 pce_dev = (struct qce_device *) cmd_ptr->user;
1262 if (result != ADM_STATUS_OK) {
1263 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1264 result);
1265 pce_dev->chan_ce_in_status = -1;
1266 } else {
1267 pce_dev->chan_ce_in_status = 0;
1268 }
1269 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1270 _sha_complete(pce_dev);
1271};
1272
1273static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1274 unsigned int result, struct msm_dmov_errdata *err)
1275{
1276 struct qce_device *pce_dev;
1277
1278 pce_dev = (struct qce_device *) cmd_ptr->user;
1279 if (result != ADM_STATUS_OK) {
1280 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1281 result);
1282 pce_dev->chan_ce_in_status = -1;
1283 } else {
1284 pce_dev->chan_ce_in_status = 0;
1285 }
1286
1287 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1288 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1289 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1290 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1291
1292 /* done */
1293 _ablk_cipher_complete(pce_dev);
1294 }
1295};
1296
1297static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1298 unsigned int result, struct msm_dmov_errdata *err)
1299{
1300 struct qce_device *pce_dev;
1301
1302 pce_dev = (struct qce_device *) cmd_ptr->user;
1303 if (result != ADM_STATUS_OK) {
1304 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1305 result);
1306 pce_dev->chan_ce_out_status = -1;
1307 } else {
1308 pce_dev->chan_ce_out_status = 0;
1309 };
1310
1311 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1312 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1313 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1314 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1315
1316 /* done */
1317 _ablk_cipher_complete(pce_dev);
1318 }
1319};
1320
1321
1322static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1323 unsigned int result, struct msm_dmov_errdata *err)
1324{
1325 struct qce_device *pce_dev;
1326
1327 pce_dev = (struct qce_device *) cmd_ptr->user;
1328 if (result != ADM_STATUS_OK) {
1329 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1330 result);
1331 pce_dev->chan_ce_in_status = -1;
1332 } else {
1333 pce_dev->chan_ce_in_status = 0;
1334 }
1335
1336 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1337 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1338 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1339 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1340
1341 /* done */
1342 _ablk_cipher_use_pmem_complete(pce_dev);
1343 }
1344};
1345
1346static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1347 unsigned int result, struct msm_dmov_errdata *err)
1348{
1349 struct qce_device *pce_dev;
1350
1351 pce_dev = (struct qce_device *) cmd_ptr->user;
1352 if (result != ADM_STATUS_OK) {
1353 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1354 result);
1355 pce_dev->chan_ce_out_status = -1;
1356 } else {
1357 pce_dev->chan_ce_out_status = 0;
1358 };
1359
1360 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1361 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1362 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1363 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1364
1365 /* done */
1366 _ablk_cipher_use_pmem_complete(pce_dev);
1367 }
1368};
1369
1370static int _setup_cmd_template(struct qce_device *pce_dev)
1371{
1372 dmov_sg *pcmd;
1373 struct dmov_desc *pdesc;
1374 unsigned char *vaddr;
1375 int i = 0;
1376
1377 /* Divide up the 4K coherent memory */
1378
1379 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1380 vaddr = pce_dev->coh_vmem;
1381 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1382 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1383 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1384 (vaddr - pce_dev->coh_vmem);
1385 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1386
1387 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1388 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1389 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1390 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1391 (vaddr - pce_dev->coh_vmem);
1392 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1393
1394 /* 3. ce_in channel command list of one scatter gather command */
1395 pce_dev->cmd_list_ce_in = vaddr;
1396 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1397 + (vaddr - pce_dev->coh_vmem);
1398 vaddr = vaddr + sizeof(dmov_sg);
1399
1400 /* 4. authentication result. */
1401 pce_dev->dig_result = vaddr;
1402 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1403 (vaddr - pce_dev->coh_vmem);
1404 vaddr = vaddr + SHA256_DIGESTSIZE;
1405
1406 /* 5. ce_out channel command list of one scatter gather command */
1407 pce_dev->cmd_list_ce_out = vaddr;
1408 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1409 + (vaddr - pce_dev->coh_vmem);
1410 vaddr = vaddr + sizeof(dmov_sg);
1411
1412 /* 6. ce_out channel command src descriptors, 1 entry */
1413 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1414 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1415 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1416 + (vaddr - pce_dev->coh_vmem);
1417 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1418
1419 /* 7. ce_out channel command dst descriptors, 128 entries. */
1420 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1421 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1422 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1423 + (vaddr - pce_dev->coh_vmem);
1424 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1425
1426 /* 8. pad area. */
1427 pce_dev->ce_pad = vaddr;
1428 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1429 (vaddr - pce_dev->coh_vmem);
1430
1431 /* Padding length is set to twice for worst case scenario in AES-CCM */
1432 vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
1433
1434 /* 9. ce_in channel command pointer list. */
1435 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1436 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1437 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1438 (vaddr - pce_dev->coh_vmem);
1439 vaddr = vaddr + sizeof(unsigned char *);
1440
1441 /* 10. ce_ou channel command pointer list. */
1442 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1443 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1444 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1445 (vaddr - pce_dev->coh_vmem);
1446 vaddr = vaddr + sizeof(unsigned char *);
1447
1448 /* 11. throw away area to store by-pass data from ce_out. */
1449 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1450 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1451 + (vaddr - pce_dev->coh_vmem);
1452 pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
1453 pce_dev->coh_vmem); /* at least 1.5 K of space */
1454 /*
1455 * The first command of command list ce_in is for the input of
1456 * concurrent operation of encrypt/decrypt or for the input
1457 * of authentication.
1458 */
1459 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1460 /* swap byte and half word , dst crci , scatter gather */
1461 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1462 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1463 pdesc = pce_dev->ce_in_src_desc;
1464 pdesc->addr = 0; /* to be filled in each operation */
1465 pdesc->len = 0; /* to be filled in each operation */
1466 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1467
1468 pdesc = pce_dev->ce_in_dst_desc;
1469 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1470 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1471 pdesc->len = 0; /* to be filled in each operation */
1472 pdesc++;
1473 }
1474 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1475 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1476 DST_INDEX_SG_CMD(0);
1477 pcmd++;
1478
1479 /* setup command pointer list */
1480 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1481 DMOV_CMD_ADDR((unsigned int)
1482 pce_dev->phy_cmd_list_ce_in));
1483 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1484 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1485 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1486 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1487 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1488 pce_dev->crci_in);
1489
1490
1491 /*
1492 * The first command in the command list ce_out.
1493 * It is for encry/decryp output.
1494 * If hashing only, ce_out is not used.
1495 */
1496 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1497 /* swap byte, half word, source crci, scatter gather */
1498 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1499 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1500
1501 pdesc = pce_dev->ce_out_src_desc;
1502 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1503 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1504 pdesc->len = 0; /* to be filled in each operation */
1505 pdesc++;
1506 }
1507 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1508
1509 pdesc = pce_dev->ce_out_dst_desc;
1510 pdesc->addr = 0; /* to be filled in each operation */
1511 pdesc->len = 0; /* to be filled in each operation */
1512 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1513 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1514 DST_INDEX_SG_CMD(0);
1515
1516 pcmd++;
1517
1518 /* setup command pointer list */
1519 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1520 DMOV_CMD_ADDR((unsigned int)pce_dev->
1521 phy_cmd_list_ce_out));
1522
1523 pce_dev->chan_ce_out_cmd->user = pce_dev;
1524 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1525 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1526 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1527 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1528 pce_dev->crci_out);
1529
1530 return 0;
1531};
1532
1533static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1534{
1535
1536 if (ce_in)
1537 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1538 else
1539 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1540
1541 if (ce_out)
1542 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1543 else
1544 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1545
1546 if (ce_in)
1547 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1548 pce_dev->chan_ce_in_cmd);
1549 if (ce_out)
1550 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1551 pce_dev->chan_ce_out_cmd);
1552
1553 return 0;
1554};
1555
1556int qce_aead_req(void *handle, struct qce_req *q_req)
1557{
1558 struct qce_device *pce_dev = (struct qce_device *) handle;
1559 struct aead_request *areq = (struct aead_request *) q_req->areq;
1560 uint32_t authsize = q_req->authsize;
1561 uint32_t totallen_in, totallen_out, out_len;
1562 uint32_t pad_len_in, pad_len_out;
1563 uint32_t pad_mac_len_out, pad_ptx_len_out;
1564 int rc = 0;
1565
1566 if (q_req->dir == QCE_ENCRYPT) {
1567 q_req->cryptlen = areq->cryptlen;
1568 totallen_in = q_req->cryptlen + areq->assoclen;
1569 totallen_out = q_req->cryptlen + authsize + areq->assoclen;
1570 out_len = areq->cryptlen + authsize;
1571 pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
1572 totallen_in;
1573 pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
1574 authsize;
1575 pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
1576 q_req->cryptlen;
1577 pad_len_out = pad_ptx_len_out + pad_mac_len_out;
1578 totallen_out += pad_len_out;
1579 } else {
1580 q_req->cryptlen = areq->cryptlen - authsize;
1581 totallen_in = areq->cryptlen + areq->assoclen;
1582 totallen_out = q_req->cryptlen + areq->assoclen;
1583 out_len = areq->cryptlen - authsize;
1584 pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
1585 areq->cryptlen;
1586 pad_len_out = pad_len_in + authsize;
1587 totallen_out += pad_len_out;
1588 }
1589
1590 _chain_buffer_in_init(pce_dev);
1591 _chain_buffer_out_init(pce_dev);
1592
1593 pce_dev->assoc_nents = 0;
1594 pce_dev->src_nents = 0;
1595 pce_dev->dst_nents = 0;
1596 pce_dev->ivsize = q_req->ivsize;
1597 pce_dev->authsize = q_req->authsize;
1598
1599 /* associated data input */
1600 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1601 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1602 DMA_TO_DEVICE);
1603 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1604 rc = -ENOMEM;
1605 goto bad;
1606 }
1607 /* cipher input */
1608 pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
1609 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1610 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1611 DMA_TO_DEVICE);
1612 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
1613 rc = -ENOMEM;
1614 goto bad;
1615 }
1616 /* pad data in */
1617 if (pad_len_in) {
1618 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1619 pad_len_in) < 0) {
1620 rc = -ENOMEM;
1621 goto bad;
1622 }
1623 }
1624
1625 /* ignore associated data */
1626 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1627 areq->assoclen) < 0) {
1628 rc = -ENOMEM;
1629 goto bad;
1630 }
1631 /* cipher + mac output for encryption */
1632 if (areq->src != areq->dst) {
1633 pce_dev->dst_nents = count_sg(areq->dst, out_len);
1634 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1635 DMA_FROM_DEVICE);
1636 };
1637 if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
1638 rc = -ENOMEM;
1639 goto bad;
1640 }
1641 /* pad data out */
1642 if (pad_len_out) {
1643 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1644 pad_len_out) < 0) {
1645 rc = -ENOMEM;
1646 goto bad;
1647 }
1648 }
1649
1650 /* finalize the ce_in and ce_out channels command lists */
1651 _ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
1652 _ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
1653
1654 /* set up crypto device */
1655 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
1656 if (rc < 0)
1657 goto bad;
1658
1659 /* setup for callback, and issue command to adm */
1660 pce_dev->areq = q_req->areq;
1661 pce_dev->qce_cb = q_req->qce_cb;
1662
1663 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
1664 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
1665
1666 _ce_in_dump(pce_dev);
1667 _ce_out_dump(pce_dev);
1668
1669 rc = _qce_start_dma(pce_dev, true, true);
1670 if (rc == 0)
1671 return 0;
1672bad:
1673 if (pce_dev->assoc_nents) {
1674 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1675 DMA_TO_DEVICE);
1676 }
1677
1678 if (pce_dev->src_nents) {
1679 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1680 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1681 DMA_TO_DEVICE);
1682 }
1683 if (pce_dev->dst_nents) {
1684 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1685 DMA_FROM_DEVICE);
1686 }
1687 return rc;
1688}
1689EXPORT_SYMBOL(qce_aead_req);
1690
1691int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
1692{
1693 int rc = 0;
1694 struct qce_device *pce_dev = (struct qce_device *) handle;
1695 struct ablkcipher_request *areq = (struct ablkcipher_request *)
1696 c_req->areq;
1697
1698 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
1699 - areq->nbytes;
1700
1701 _chain_buffer_in_init(pce_dev);
1702 _chain_buffer_out_init(pce_dev);
1703
1704 pce_dev->src_nents = 0;
1705 pce_dev->dst_nents = 0;
1706
1707 /* cipher input */
1708 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
1709
1710 if (c_req->use_pmem != 1)
1711 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1712 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1713 DMA_TO_DEVICE);
1714 else
1715 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
1716 areq->src);
1717
1718 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
1719 rc = -ENOMEM;
1720 goto bad;
1721 }
1722
1723 /* cipher output */
1724 if (areq->src != areq->dst) {
1725 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
1726 if (c_req->use_pmem != 1)
1727 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1728 DMA_FROM_DEVICE);
1729 else
1730 dma_map_pmem_sg(&c_req->pmem->dst[0],
1731 pce_dev->dst_nents, areq->dst);
1732 };
1733 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
1734 rc = -ENOMEM;
1735 goto bad;
1736 }
1737
1738 /* pad data */
1739 if (pad_len) {
1740 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1741 pad_len) < 0) {
1742 rc = -ENOMEM;
1743 goto bad;
1744 }
1745 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1746 pad_len) < 0) {
1747 rc = -ENOMEM;
1748 goto bad;
1749 }
1750 }
1751
1752 /* finalize the ce_in and ce_out channels command lists */
1753 _ce_in_final(pce_dev, areq->nbytes + pad_len);
1754 _ce_out_final(pce_dev, areq->nbytes + pad_len);
1755
1756 _ce_in_dump(pce_dev);
1757 _ce_out_dump(pce_dev);
1758
1759 /* set up crypto device */
1760 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
1761 if (rc < 0)
1762 goto bad;
1763
1764 /* setup for callback, and issue command to adm */
1765 pce_dev->areq = areq;
1766 pce_dev->qce_cb = c_req->qce_cb;
1767 if (c_req->use_pmem == 1) {
1768 pce_dev->chan_ce_in_cmd->complete_func =
1769 _ablk_cipher_ce_in_call_back_pmem;
1770 pce_dev->chan_ce_out_cmd->complete_func =
1771 _ablk_cipher_ce_out_call_back_pmem;
1772 } else {
1773 pce_dev->chan_ce_in_cmd->complete_func =
1774 _ablk_cipher_ce_in_call_back;
1775 pce_dev->chan_ce_out_cmd->complete_func =
1776 _ablk_cipher_ce_out_call_back;
1777 }
1778 rc = _qce_start_dma(pce_dev, true, true);
1779
1780 if (rc == 0)
1781 return 0;
1782bad:
1783 if (c_req->use_pmem != 1) {
1784 if (pce_dev->dst_nents) {
1785 dma_unmap_sg(pce_dev->pdev, areq->dst,
1786 pce_dev->dst_nents, DMA_FROM_DEVICE);
1787 }
1788 if (pce_dev->src_nents) {
1789 dma_unmap_sg(pce_dev->pdev, areq->src,
1790 pce_dev->src_nents,
1791 (areq->src == areq->dst) ?
1792 DMA_BIDIRECTIONAL :
1793 DMA_TO_DEVICE);
1794 }
1795 }
1796 return rc;
1797}
1798EXPORT_SYMBOL(qce_ablk_cipher_req);
1799
1800int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
1801{
1802 struct qce_device *pce_dev = (struct qce_device *) handle;
1803 int rc;
1804 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
1805 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
1806
1807 _chain_buffer_in_init(pce_dev);
1808 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
1809 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
1810 DMA_TO_DEVICE);
1811
1812 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
1813 rc = -ENOMEM;
1814 goto bad;
1815 }
1816
1817 if (pad_len) {
1818 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1819 pad_len) < 0) {
1820 rc = -ENOMEM;
1821 goto bad;
1822 }
1823 }
1824 _ce_in_final(pce_dev, sreq->size + pad_len);
1825
1826 _ce_in_dump(pce_dev);
1827
1828 rc = _ce_setup_hash(pce_dev, sreq);
1829
1830 if (rc < 0)
1831 goto bad;
1832
1833 pce_dev->areq = areq;
1834 pce_dev->qce_cb = sreq->qce_cb;
1835 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
1836
1837 rc = _qce_start_dma(pce_dev, true, false);
1838
1839 if (rc == 0)
1840 return 0;
1841bad:
1842 if (pce_dev->src_nents) {
1843 dma_unmap_sg(pce_dev->pdev, sreq->src,
1844 pce_dev->src_nents, DMA_TO_DEVICE);
1845 }
1846
1847 return rc;
1848}
1849EXPORT_SYMBOL(qce_process_sha_req);
1850
1851/* crypto engine open function. */
1852void *qce_open(struct platform_device *pdev, int *rc)
1853{
1854 struct qce_device *pce_dev;
1855 struct resource *resource;
1856 struct clk *ce_core_clk;
1857 struct clk *ce_clk;
1858
1859 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
1860 if (!pce_dev) {
1861 *rc = -ENOMEM;
1862 dev_err(&pdev->dev, "Can not allocate memory\n");
1863 return NULL;
1864 }
1865 pce_dev->pdev = &pdev->dev;
1866
1867 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1868 if (!resource) {
1869 *rc = -ENXIO;
1870 dev_err(pce_dev->pdev, "Missing MEM resource\n");
1871 goto err_pce_dev;
1872 };
1873 pce_dev->phy_iobase = resource->start;
1874 pce_dev->iobase = ioremap_nocache(resource->start,
1875 resource->end - resource->start + 1);
1876 if (!pce_dev->iobase) {
1877 *rc = -ENOMEM;
1878 dev_err(pce_dev->pdev, "Can not map io memory\n");
1879 goto err_pce_dev;
1880 }
1881
1882 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1883 GFP_KERNEL);
1884 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1885 GFP_KERNEL);
1886 if (pce_dev->chan_ce_in_cmd == NULL ||
1887 pce_dev->chan_ce_out_cmd == NULL) {
1888 dev_err(pce_dev->pdev, "Can not allocate memory\n");
1889 *rc = -ENOMEM;
1890 goto err_dm_chan_cmd;
1891 }
1892
1893 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1894 "crypto_channels");
1895 if (!resource) {
1896 *rc = -ENXIO;
1897 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
1898 goto err_dm_chan_cmd;
1899 };
1900 pce_dev->chan_ce_in = resource->start;
1901 pce_dev->chan_ce_out = resource->end;
1902 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1903 "crypto_crci_in");
1904 if (!resource) {
1905 *rc = -ENXIO;
1906 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
1907 goto err_dm_chan_cmd;
1908 };
1909 pce_dev->crci_in = resource->start;
1910 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1911 "crypto_crci_out");
1912 if (!resource) {
1913 *rc = -ENXIO;
1914 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
1915 goto err_dm_chan_cmd;
1916 };
1917 pce_dev->crci_out = resource->start;
1918
1919 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
1920 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
1921
1922 if (pce_dev->coh_vmem == NULL) {
1923 *rc = -ENOMEM;
1924 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
1925 goto err;
1926 }
1927
1928 /* Get CE core clk */
1929 ce_core_clk = clk_get(pce_dev->pdev, "ce_clk");
1930 if (IS_ERR(ce_core_clk)) {
1931 *rc = PTR_ERR(ce_core_clk);
1932 goto err;
1933 }
1934 pce_dev->ce_core_clk = ce_core_clk;
1935 /* Get CE clk */
1936 ce_clk = clk_get(pce_dev->pdev, "ce_pclk");
1937 if (IS_ERR(ce_clk)) {
1938 *rc = PTR_ERR(ce_clk);
1939 clk_put(pce_dev->ce_core_clk);
1940 goto err;
1941 }
1942 pce_dev->ce_clk = ce_clk;
1943
1944 /* Enable CE core clk */
1945 *rc = clk_enable(pce_dev->ce_core_clk);
1946 if (*rc) {
1947 clk_put(pce_dev->ce_core_clk);
1948 clk_put(pce_dev->ce_clk);
1949 goto err;
1950 } else {
1951 /* Enable CE clk */
1952 *rc = clk_enable(pce_dev->ce_clk);
1953 if (*rc) {
1954 clk_disable(pce_dev->ce_core_clk);
1955 clk_put(pce_dev->ce_core_clk);
1956 clk_put(pce_dev->ce_clk);
1957 goto err;
1958
1959 }
1960 }
1961 _setup_cmd_template(pce_dev);
1962
1963 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1964 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1965
1966 if (_init_ce_engine(pce_dev)) {
1967 *rc = -ENXIO;
1968 goto err;
1969 }
1970 *rc = 0;
1971 return pce_dev;
1972
1973err:
1974 if (pce_dev->coh_vmem)
1975 dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
1976 pce_dev->coh_pmem);
1977err_dm_chan_cmd:
1978 kfree(pce_dev->chan_ce_in_cmd);
1979 kfree(pce_dev->chan_ce_out_cmd);
1980 if (pce_dev->iobase)
1981 iounmap(pce_dev->iobase);
1982
1983err_pce_dev:
1984
1985 kfree(pce_dev);
1986
1987 return NULL;
1988}
1989EXPORT_SYMBOL(qce_open);
1990
1991/* crypto engine close function. */
1992int qce_close(void *handle)
1993{
1994 struct qce_device *pce_dev = (struct qce_device *) handle;
1995
1996 if (handle == NULL)
1997 return -ENODEV;
1998 if (pce_dev->iobase)
1999 iounmap(pce_dev->iobase);
2000
2001 if (pce_dev->coh_vmem)
2002 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
2003 pce_dev->coh_pmem);
2004 clk_disable(pce_dev->ce_clk);
2005 clk_disable(pce_dev->ce_core_clk);
2006
2007 clk_put(pce_dev->ce_clk);
2008 clk_put(pce_dev->ce_core_clk);
2009
2010 kfree(pce_dev->chan_ce_in_cmd);
2011 kfree(pce_dev->chan_ce_out_cmd);
2012 kfree(handle);
2013
2014 return 0;
2015}
2016EXPORT_SYMBOL(qce_close);
2017
2018int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2019{
2020 if (ce_support == NULL)
2021 return -EINVAL;
2022
2023 ce_support->sha1_hmac_20 = false;
2024 ce_support->sha1_hmac = false;
2025 ce_support->sha256_hmac = false;
2026 ce_support->sha_hmac = false;
2027 ce_support->cmac = true;
2028 ce_support->aes_key_192 = false;
2029 ce_support->aes_xts = true;
2030 ce_support->aes_ccm = true;
2031 ce_support->ota = false;
2032 return 0;
2033}
2034EXPORT_SYMBOL(qce_hw_support);
2035
2036MODULE_LICENSE("GPL v2");
2037MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2038MODULE_DESCRIPTION("Crypto Engine driver");
2039MODULE_VERSION("2.04");