blob: 154410b6c7888bdbb0b590cd08ab2cb9dc643189 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070027#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <crypto/hash.h>
29#include <crypto/sha.h>
30#include <mach/dma.h>
31#include <mach/clk.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070032
33#include "qce.h"
34#include "qcryptohw_40.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
36/* ADM definitions */
37#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
38#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
39#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
40#define ADM_DESC_LAST (1 << 31)
41
42/* Data xfer between DM and CE in blocks of 16 bytes */
43#define ADM_CE_BLOCK_SIZE 16
44
45#define ADM_DESC_LENGTH_MASK 0xffff
46#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
47
48struct dmov_desc {
49 uint32_t addr;
50 uint32_t len;
51};
52
53#define ADM_STATUS_OK 0x80000002
54
55/* Misc definitions */
56
57/* QCE max number of descriptor in a descriptor list */
58#define QCE_MAX_NUM_DESC 128
59
Mona Hossaine1b13f82011-08-30 09:35:49 -070060/* QCE BUFFER SIZE */
61#define QCE_BUF_SIZE (2 * PAGE_SIZE)
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063/* State of DM channel */
64enum qce_chan_st_enum {
65 QCE_CHAN_STATE_IDLE = 0,
66 QCE_CHAN_STATE_IN_PROG = 1,
67 QCE_CHAN_STATE_COMP = 2,
68 QCE_CHAN_STATE_LAST
69};
70
71/*
72 * CE HW device structure.
73 * Each engine has an instance of the structure.
74 * Each engine can only handle one crypto operation at one time. It is up to
75 * the sw above to ensure single threading of operation on an engine.
76 */
77struct qce_device {
78 struct device *pdev; /* Handle to platform_device structure */
79 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
80 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
81 void __iomem *iobase; /* Virtual io base of CE HW */
82 unsigned int phy_iobase; /* Physical io base of CE HW */
83 struct clk *ce_core_clk; /* Handle to CE clk */
84 struct clk *ce_clk; /* Handle to CE clk */
85 unsigned int crci_in; /* CRCI for CE DM IN Channel */
86 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
87 unsigned int chan_ce_in; /* ADM channel used for CE input
88 * and auth result if authentication
89 * only operation. */
90 unsigned int chan_ce_out; /* ADM channel used for CE output,
91 and icv for esp */
92 unsigned int *cmd_pointer_list_ce_in;
93 dma_addr_t phy_cmd_pointer_list_ce_in;
94
95 unsigned int *cmd_pointer_list_ce_out;
96 dma_addr_t phy_cmd_pointer_list_ce_out;
97
98 unsigned char *cmd_list_ce_in;
99 dma_addr_t phy_cmd_list_ce_in;
100
101 unsigned char *cmd_list_ce_out;
102 dma_addr_t phy_cmd_list_ce_out;
103
104 struct dmov_desc *ce_out_src_desc;
105 dma_addr_t phy_ce_out_src_desc;
106
107 struct dmov_desc *ce_out_dst_desc;
108 dma_addr_t phy_ce_out_dst_desc;
109
110 struct dmov_desc *ce_in_src_desc;
111 dma_addr_t phy_ce_in_src_desc;
112
113 struct dmov_desc *ce_in_dst_desc;
114 dma_addr_t phy_ce_in_dst_desc;
115
116 unsigned char *ce_out_ignore;
117 dma_addr_t phy_ce_out_ignore;
118
119 unsigned char *ce_pad;
120 dma_addr_t phy_ce_pad;
121
122 struct msm_dmov_cmd *chan_ce_in_cmd;
123 struct msm_dmov_cmd *chan_ce_out_cmd;
124
125 uint32_t ce_out_ignore_size;
126
127 int ce_out_dst_desc_index;
128 int ce_in_dst_desc_index;
129
130 int ce_out_src_desc_index;
131 int ce_in_src_desc_index;
132
133 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
134 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
135
136 int chan_ce_in_status; /* chan ce_in status */
137 int chan_ce_out_status; /* chan ce_out status */
138
139 unsigned char *dig_result;
140 dma_addr_t phy_dig_result;
141
142 /* cached aes key */
143 uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
144
145 uint32_t cipher_key_size; /* cached aes key size in bytes */
146 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
147
148 int assoc_nents;
149 int ivsize;
150 int authsize;
151 int src_nents;
152 int dst_nents;
153
154 void *areq;
155 enum qce_cipher_mode_enum mode;
156
157 dma_addr_t phy_iv_in;
158};
159
160/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
161static uint32_t _std_init_vector_sha1[] = {
162 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
163};
164/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
165static uint32_t _std_init_vector_sha256[] = {
166 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
167 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
168};
169
170static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
171 unsigned int len)
172{
173 unsigned n;
174
175 n = len / sizeof(uint32_t) ;
176 for (; n > 0; n--) {
177 *iv = ((*b << 24) & 0xff000000) |
178 (((*(b+1)) << 16) & 0xff0000) |
179 (((*(b+2)) << 8) & 0xff00) |
180 (*(b+3) & 0xff);
181 b += sizeof(uint32_t);
182 iv++;
183 }
184
185 n = len % sizeof(uint32_t);
186 if (n == 3) {
187 *iv = ((*b << 24) & 0xff000000) |
188 (((*(b+1)) << 16) & 0xff0000) |
189 (((*(b+2)) << 8) & 0xff00) ;
190 } else if (n == 2) {
191 *iv = ((*b << 24) & 0xff000000) |
192 (((*(b+1)) << 16) & 0xff0000) ;
193 } else if (n == 1) {
194 *iv = ((*b << 24) & 0xff000000) ;
195 }
196}
197
198static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
199 unsigned int len)
200{
201 unsigned i, j;
202 unsigned char swap_iv[AES_IV_LENGTH];
203
204 memset(swap_iv, 0, AES_IV_LENGTH);
205 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
206 swap_iv[i] = b[j];
207 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
208}
209
210static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
211 unsigned int len)
212{
213 unsigned n = len / sizeof(uint32_t);
214
215 for (; n > 0; n--) {
216 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
217 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
218 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
219 *b++ = (unsigned char) (*iv & 0xff);
220 iv++;
221 }
222 n = len % sizeof(uint32_t);
223 if (n == 3) {
224 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
225 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
226 *b = (unsigned char) ((*iv >> 8) & 0xff);
227 } else if (n == 2) {
228 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
229 *b = (unsigned char) ((*iv >> 16) & 0xff);
230 } else if (n == 1) {
231 *b = (unsigned char) ((*iv >> 24) & 0xff);
232 }
233}
234
235static int count_sg(struct scatterlist *sg, int nbytes)
236{
237 int i;
238
239 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
240 nbytes -= sg->length;
241 return i;
242}
243
244static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
245 struct scatterlist *sg)
246{
247 int i;
248 for (i = 0; i < entries; i++) {
249
250 sg->dma_address = (dma_addr_t)pmem->offset;
251 sg++;
252 pmem++;
253 }
254 return 0;
255}
256
257static int _probe_ce_engine(struct qce_device *pce_dev)
258{
259 unsigned int val;
260 unsigned int rev;
261
262 val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
263 if (((val & 0xfffffff) != 0x0000042) &&
264 ((val & 0xfffffff) != 0x0000040)) {
265 dev_err(pce_dev->pdev,
266 "Unknown Qualcomm crypto device at 0x%x 0x%x\n",
267 pce_dev->phy_iobase, val);
268 return -EIO;
269 };
270 rev = (val & CRYPTO_CORE_REV_MASK);
271 if (rev == 0x42) {
272 dev_info(pce_dev->pdev,
273 "Qualcomm Crypto 4.2 device found at 0x%x\n",
274 pce_dev->phy_iobase);
275 } else {
276 if (rev == 0x40) {
277 dev_info(pce_dev->pdev,
278 "Qualcomm Crypto 4.0 device found at 0x%x\n",
279 pce_dev->phy_iobase);
280 }
281 }
282
283 dev_info(pce_dev->pdev,
284 "IO base 0x%x, ce_in channel %d, "
285 "ce_out channel %d, "
286 "crci_in %d, crci_out %d\n",
287 (unsigned int) pce_dev->iobase,
288 pce_dev->chan_ce_in, pce_dev->chan_ce_out,
289 pce_dev->crci_in, pce_dev->crci_out);
290
291 pce_dev->cipher_key_size = 0;
292
293 return 0;
294};
295
296static int _init_ce_engine(struct qce_device *pce_dev)
297{
298 unsigned int val;
299
300 /* Reset ce */
301 clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
302 clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
303 /*
304 * Ensure previous instruction (any writes to CLK registers)
305 * to toggle the CLK reset lines was completed.
306 */
307 dsb();
308 /* configure ce */
309 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
310 (1 << CRYPTO_MASK_OP_DONE_INTR) |
311 (1 << CRYPTO_MASK_ERR_INTR);
312 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
313 /*
314 * Ensure previous instruction (writel_relaxed to config register bit)
315 * was completed.
316 */
317 dsb();
318 val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
319 if (!val) {
320 dev_err(pce_dev->pdev,
321 "unknown Qualcomm crypto device at 0x%x\n",
322 pce_dev->phy_iobase);
323 return -EIO;
324 };
325 if (_probe_ce_engine(pce_dev) < 0)
326 return -EIO;
327 return 0;
328};
329
330static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
331{
332 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
333 uint32_t diglen;
334 int i;
335 uint32_t auth_cfg = 0;
336 bool sha1 = false;
337
338 if (sreq->alg == QCE_HASH_AES_CMAC) {
339 uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
340 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
341 uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
342 /* Clear auth_ivn, auth_keyn registers */
343 for (i = 0; i < 16; i++) {
344 writel_relaxed(0, (pce_dev->iobase +
345 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
346 writel_relaxed(0, (pce_dev->iobase +
347 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
348 }
349 /* write auth_bytecnt 0/1/2/3, start with 0 */
350 for (i = 0; i < 4; i++)
351 writel_relaxed(0, pce_dev->iobase +
352 CRYPTO_AUTH_BYTECNT0_REG +
353 i * sizeof(uint32_t));
354
355 _byte_stream_to_net_words(authkey32, sreq->authkey,
356 sreq->authklen);
357 for (i = 0; i < authklen32; i++)
358 writel_relaxed(authkey32[i], pce_dev->iobase +
359 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
360 /*
361 * write seg_cfg
362 */
363 auth_cfg |= (1 << CRYPTO_LAST);
364 auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
365 auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
366 CRYPTO_AUTH_SIZE);
367 auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
368
369 switch (sreq->authklen) {
370 case AES128_KEY_SIZE:
371 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
372 CRYPTO_AUTH_KEY_SIZE);
373 break;
374 case AES256_KEY_SIZE:
375 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
376 CRYPTO_AUTH_KEY_SIZE);
377 break;
378 default:
379 break;
380 }
381
382 goto go_proc;
383 }
384
385 /* if not the last, the size has to be on the block boundary */
386 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
387 return -EIO;
388
389 switch (sreq->alg) {
390 case QCE_HASH_SHA1:
391 case QCE_HASH_SHA1_HMAC:
392 diglen = SHA1_DIGEST_SIZE;
393 sha1 = true;
394 break;
395 case QCE_HASH_SHA256:
396 case QCE_HASH_SHA256_HMAC:
397 diglen = SHA256_DIGEST_SIZE;
398 break;
399 default:
400 return -EINVAL;
401 }
402
403 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
404 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
405 uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
407 uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
408
409 _byte_stream_to_net_words(hmackey, sreq->authkey,
410 sreq->authklen);
411 /* write hmac key */
412 for (i = 0; i < hmacklen; i++)
413 writel_relaxed(hmackey[i], pce_dev->iobase +
414 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
415
416 auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
417 } else {
418 auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
419 }
420
421 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
422
423 if (sreq->first_blk) {
424 if (sha1) {
425 for (i = 0; i < 5; i++)
426 auth32[i] = _std_init_vector_sha1[i];
427 } else {
428 for (i = 0; i < 8; i++)
429 auth32[i] = _std_init_vector_sha256[i];
430 }
431 } else {
432 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
433 }
434
435 for (i = 0; i < 5; i++)
436 writel_relaxed(auth32[i], (pce_dev->iobase +
437 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
438
439 if ((sreq->alg == QCE_HASH_SHA256) ||
440 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
441 writel_relaxed(auth32[5], pce_dev->iobase +
442 CRYPTO_AUTH_IV5_REG);
443 writel_relaxed(auth32[6], pce_dev->iobase +
444 CRYPTO_AUTH_IV6_REG);
445 writel_relaxed(auth32[7], pce_dev->iobase +
446 CRYPTO_AUTH_IV7_REG);
447 }
448
449 /* write auth_bytecnt 0/1, start with 0 */
450 for (i = 0; i < 4; i++)
451 writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
452 (CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
453
454 /* write seg_cfg */
455 if (sha1)
456 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
457 else
458 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
459
460 if (sreq->last_blk)
461 auth_cfg |= 1 << CRYPTO_LAST;
462
463 auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
464
465go_proc:
466 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
467
468 /* write seg_cfg */
469 writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
470
471 /* write seg_size */
472 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
473
474 /* write auth_seg_size */
475 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
476
477 /* write auth_seg_start */
478 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
479 /*
480 * Ensure previous instructions (write to all AUTH registers)
481 * was completed before accessing a register that is not in
482 * in the same 1K range.
483 */
484 dsb();
485
486 writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
487 /*
488 * Ensure previous instructions (setting all the CE registers)
489 * was completed before writing to GO register
490 */
491 dsb();
492 /* issue go to crypto */
493 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
494 /*
495 * Ensure previous instructions (setting the GO register)
496 * was completed before issuing a DMA transfer request
497 */
498 dsb();
499
500 return 0;
501}
502
503static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
504 uint32_t totallen_in, uint32_t coffset)
505{
506 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
507 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
508 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
509 0, 0, 0, 0};
510 uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
511 int aes_key_chg;
512 int i;
513 uint32_t encr_cfg = 0;
514 uint32_t ivsize = creq->ivsize;
515
516 if (creq->mode == QCE_MODE_XTS)
517 _byte_stream_to_net_words(enckey32, creq->enckey,
518 creq->encklen/2);
519 else
520 _byte_stream_to_net_words(enckey32, creq->enckey,
521 creq->encklen);
522
523 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
524 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
525 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
526 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
527 uint32_t auth_cfg = 0;
528
529 /* Clear auth_ivn, auth_keyn registers */
530 for (i = 0; i < 16; i++) {
531 writel_relaxed(0, (pce_dev->iobase +
532 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
533 writel_relaxed(0, (pce_dev->iobase +
534 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
535 }
536 /* write auth_bytecnt 0/1/2/3, start with 0 */
537 for (i = 0; i < 4; i++)
538 writel_relaxed(0, pce_dev->iobase +
539 CRYPTO_AUTH_BYTECNT0_REG +
540 i * sizeof(uint32_t));
541 /* write auth key */
542 for (i = 0; i < authklen32; i++)
543 writel_relaxed(enckey32[i], pce_dev->iobase +
544 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
545
546 /* write nonce */
547 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
548 for (i = 0; i < noncelen32; i++)
549 writel_relaxed(nonce32[i], pce_dev->iobase +
550 CRYPTO_AUTH_INFO_NONCE0_REG +
551 (i*sizeof(uint32_t)));
552
553 auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
554 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
555 auth_cfg |= (1 << CRYPTO_LAST);
556 if (creq->dir == QCE_ENCRYPT)
557 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
558 else
559 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
560 auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
561 auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
562 if (creq->authklen == AES128_KEY_SIZE)
563 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
564 CRYPTO_AUTH_KEY_SIZE);
565 else {
566 if (creq->authklen == AES256_KEY_SIZE)
567 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
568 CRYPTO_AUTH_KEY_SIZE);
569 }
570 auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
571 writel_relaxed(auth_cfg, pce_dev->iobase +
572 CRYPTO_AUTH_SEG_CFG_REG);
573 if (creq->dir == QCE_ENCRYPT)
574 writel_relaxed(totallen_in, pce_dev->iobase +
575 CRYPTO_AUTH_SEG_SIZE_REG);
576 else
577 writel_relaxed((totallen_in - creq->authsize),
578 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
579 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
580 } else {
581 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
582 }
583 /*
584 * Ensure previous instructions (write to all AUTH registers)
585 * was completed before accessing a register that is not in
586 * in the same 1K range.
587 */
588 dsb();
589
590 switch (creq->mode) {
591 case QCE_MODE_ECB:
592 encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
593 break;
594
595 case QCE_MODE_CBC:
596 encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
597 break;
598
599 case QCE_MODE_XTS:
600 encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
601 break;
602
603 case QCE_MODE_CCM:
604 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
605 break;
606
607 case QCE_MODE_CTR:
608 default:
609 encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
610 break;
611 }
612 pce_dev->mode = creq->mode;
613
614 switch (creq->alg) {
615 case CIPHER_ALG_DES:
616 if (creq->mode != QCE_MODE_ECB) {
617 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
618 writel_relaxed(enciv32[0], pce_dev->iobase +
619 CRYPTO_CNTR0_IV0_REG);
620 writel_relaxed(enciv32[1], pce_dev->iobase +
621 CRYPTO_CNTR1_IV1_REG);
622 }
623 writel_relaxed(enckey32[0], pce_dev->iobase +
624 CRYPTO_ENCR_KEY0_REG);
625 writel_relaxed(enckey32[1], pce_dev->iobase +
626 CRYPTO_ENCR_KEY1_REG);
627 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
628 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
629 break;
630
631 case CIPHER_ALG_3DES:
632 if (creq->mode != QCE_MODE_ECB) {
633 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
634 writel_relaxed(enciv32[0], pce_dev->iobase +
635 CRYPTO_CNTR0_IV0_REG);
636 writel_relaxed(enciv32[1], pce_dev->iobase +
637 CRYPTO_CNTR1_IV1_REG);
638 }
639 for (i = 0; i < 6; i++)
640 writel_relaxed(enckey32[0], (pce_dev->iobase +
641 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
642
643 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
644 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
645 break;
646
647 case CIPHER_ALG_AES:
648 default:
649 if (creq->mode == QCE_MODE_XTS) {
650 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
651 = {0, 0, 0, 0, 0, 0, 0, 0};
652 uint32_t xtsklen =
653 creq->encklen/(2 * sizeof(uint32_t));
654
655 _byte_stream_to_net_words(xtskey32, (creq->enckey +
656 creq->encklen/2), creq->encklen/2);
657 for (i = 0; i < xtsklen; i++)
658 writel_relaxed(xtskey32[i], pce_dev->iobase +
659 CRYPTO_ENCR_XTS_KEY0_REG +
660 (i * sizeof(uint32_t)));
661
662 writel_relaxed(creq->cryptlen ,
663 pce_dev->iobase +
664 CRYPTO_ENCR_XTS_DU_SIZE_REG);
665 }
666 if (creq->mode != QCE_MODE_ECB) {
667 if (creq->mode == QCE_MODE_XTS)
668 _byte_stream_swap_to_net_words(enciv32,
669 creq->iv, ivsize);
670 else
671 _byte_stream_to_net_words(enciv32, creq->iv,
672 ivsize);
673 for (i = 0; i <= 3; i++)
674 writel_relaxed(enciv32[i], pce_dev->iobase +
675 CRYPTO_CNTR0_IV0_REG +
676 (i * sizeof(uint32_t)));
677 }
678 /* set number of counter bits */
679 writel_relaxed(0xffffffff, pce_dev->iobase +
680 CRYPTO_CNTR_MASK_REG);
681
682 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
683 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
684 CRYPTO_ENCR_KEY_SZ);
685 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
686 } else {
687 uint32_t key_size;
688
689 if (creq->mode == QCE_MODE_XTS) {
690 key_size = creq->encklen/2;
691 enck_size_in_word = key_size/sizeof(uint32_t);
692 } else {
693 key_size = creq->encklen;
694 }
695
696 switch (key_size) {
697 case AES128_KEY_SIZE:
698 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
699 CRYPTO_ENCR_KEY_SZ);
700 break;
701 case AES256_KEY_SIZE:
702 default:
703 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
704 CRYPTO_ENCR_KEY_SZ);
705
706 /* check for null key. If null, use hw key*/
707 for (i = 0; i < enck_size_in_word; i++) {
708 if (enckey32[i] != 0)
709 break;
710 }
711 if (i == enck_size_in_word)
712 encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
713 break;
714 } /* end of switch (creq->encklen) */
715
716 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
717 if (pce_dev->cipher_key_size != creq->encklen)
718 aes_key_chg = 1;
719 else {
720 for (i = 0; i < enck_size_in_word; i++) {
721 if (enckey32[i]
722 != pce_dev->cipher_key[i])
723 break;
724 }
725 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
726 }
727
728 if (aes_key_chg) {
729 for (i = 0; i < enck_size_in_word; i++)
730 writel_relaxed(enckey32[i],
731 pce_dev->iobase +
732 CRYPTO_ENCR_KEY0_REG +
733 (i * sizeof(uint32_t)));
734 pce_dev->cipher_key_size = creq->encklen;
735 for (i = 0; i < enck_size_in_word; i++)
736 pce_dev->cipher_key[i] = enckey32[i];
737 } /*if (aes_key_chg) { */
738 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
739 break;
740 } /* end of switch (creq->mode) */
741
742 /* write encr seg cfg */
743 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
744
745 /* write encr seg cfg */
746 writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
747
748 /* write encr seg size */
749 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
750 writel_relaxed((creq->cryptlen + creq->authsize),
751 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
752 else
753 writel_relaxed(creq->cryptlen,
754 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
755 /* write encr seg start */
756 writel_relaxed((coffset & 0xffff),
757 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
758 /* write seg size */
759 writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
760 /*
761 * Ensure previous instructions (setting all the CE registers)
762 * was completed before writing to GO register
763 */
764 dsb();
765 /* issue go to crypto */
766 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
767 /*
768 * Ensure previous instructions (setting the GO register)
769 * was completed before issuing a DMA transfer request
770 */
771 dsb();
772 return 0;
773};
774
775static int _aead_complete(struct qce_device *pce_dev)
776{
777 struct aead_request *areq;
778 int i;
779 uint32_t ivsize;
780 uint32_t iv_out[4];
781 unsigned char iv[4 * sizeof(uint32_t)];
782
783 areq = (struct aead_request *) pce_dev->areq;
784 ivsize = pce_dev->ivsize;
785
786 if (areq->src != areq->dst) {
787 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
788 DMA_FROM_DEVICE);
789 }
790 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
791 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
792 DMA_TO_DEVICE);
793
794 if (pce_dev->mode != QCE_MODE_CCM)
795 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
796 ivsize, DMA_TO_DEVICE);
797 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
798 DMA_TO_DEVICE);
799
800 /* get iv out */
801 if ((pce_dev->mode == QCE_MODE_ECB) ||
802 (pce_dev->mode == QCE_MODE_CCM)) {
803 if (pce_dev->mode == QCE_MODE_CCM) {
804 int result;
805 result = readl_relaxed(pce_dev->iobase +
806 CRYPTO_STATUS_REG);
807 result &= (1 << CRYPTO_MAC_FAILED);
808 result |= (pce_dev->chan_ce_in_status |
809 pce_dev->chan_ce_out_status);
810 dsb();
811 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
812 result);
813 } else {
814 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
815 pce_dev->chan_ce_in_status |
816 pce_dev->chan_ce_out_status);
817 }
818 } else {
819 for (i = 0; i < 4; i++)
820 iv_out[i] = readl_relaxed(pce_dev->iobase +
821 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
822
823 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
824 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
825 pce_dev->chan_ce_in_status |
826 pce_dev->chan_ce_out_status);
827 };
828 return 0;
829};
830
831static void _sha_complete(struct qce_device *pce_dev)
832{
833
834 struct ahash_request *areq;
835 uint32_t auth_data[4];
836 uint32_t digest[8];
837 int i;
838
839 areq = (struct ahash_request *) pce_dev->areq;
840 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
841 DMA_TO_DEVICE);
842
843 for (i = 0; i < 4; i++)
844 auth_data[i] = readl_relaxed(pce_dev->iobase +
845 (CRYPTO_AUTH_BYTECNT0_REG +
846 i * sizeof(uint32_t)));
847
848 for (i = 0; i < 8; i++)
849 digest[i] = readl_relaxed(pce_dev->iobase +
850 CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
851
852 _net_words_to_byte_stream(digest, pce_dev->dig_result,
853 SHA256_DIGEST_SIZE);
854
855 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
856 pce_dev->chan_ce_in_status);
857};
858
859static int _ablk_cipher_complete(struct qce_device *pce_dev)
860{
861 struct ablkcipher_request *areq;
862 uint32_t iv_out[4];
863 unsigned char iv[4 * sizeof(uint32_t)];
864
865 areq = (struct ablkcipher_request *) pce_dev->areq;
866
867 if (areq->src != areq->dst) {
868 dma_unmap_sg(pce_dev->pdev, areq->dst,
869 pce_dev->dst_nents, DMA_FROM_DEVICE);
870 }
871 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
872 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
873 DMA_TO_DEVICE);
874 /* get iv out */
875 if (pce_dev->mode == QCE_MODE_ECB) {
876 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
877 pce_dev->chan_ce_out_status);
878 } else {
879 int i;
880
881 for (i = 0; i < 4; i++)
882 iv_out[i] = readl_relaxed(pce_dev->iobase +
883 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
884
885 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
886 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
887 pce_dev->chan_ce_out_status);
888 }
889
890 return 0;
891};
892
893static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
894{
895 struct ablkcipher_request *areq;
896 uint32_t iv_out[4];
897 unsigned char iv[4 * sizeof(uint32_t)];
898
899 areq = (struct ablkcipher_request *) pce_dev->areq;
900
901 /* get iv out */
902 if (pce_dev->mode == QCE_MODE_ECB) {
903 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
904 pce_dev->chan_ce_out_status);
905 } else {
906 int i;
907
908 for (i = 0; i < 4; i++)
909 iv_out[i] = readl_relaxed(pce_dev->iobase +
910 CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
911
912 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
913 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
914 pce_dev->chan_ce_out_status);
915 }
916
917 return 0;
918};
919
920static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
921 unsigned int plen, unsigned int paddr, int *index)
922{
923 while (plen > 0x8000) {
924 pdesc->len = 0x8000;
925 if (paddr > 0) {
926 pdesc->addr = paddr;
927 paddr += 0x8000;
928 }
929 plen -= pdesc->len;
930 if (plen > 0) {
931 *index = (*index) + 1;
932 if ((*index) >= QCE_MAX_NUM_DESC)
933 return -ENOMEM;
934 pdesc++;
935 }
936 }
937 if ((plen > 0) && (plen <= 0x8000)) {
938 pdesc->len = plen;
939 if (paddr > 0)
940 pdesc->addr = paddr;
941 }
942
943 return 0;
944}
945
946static int _chain_sg_buffer_in(struct qce_device *pce_dev,
947 struct scatterlist *sg, unsigned int nbytes)
948{
949 unsigned int len;
950 unsigned int dlen;
951 struct dmov_desc *pdesc;
952
953 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
954 if (nbytes > 0x8000)
955 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
956 &pce_dev->ce_in_dst_desc_index);
957 else
958 pdesc->len = nbytes;
959
960 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
961 /*
962 * Two consective chunks may be handled by the old
963 * buffer descriptor.
964 */
965 while (nbytes > 0) {
966 len = min(nbytes, sg_dma_len(sg));
967 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
968 nbytes -= len;
969 if (dlen == 0) {
970 pdesc->addr = sg_dma_address(sg);
971 pdesc->len = len;
972 if (pdesc->len > 0x8000)
973 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
974 sg_dma_address(sg),
975 &pce_dev->ce_in_src_desc_index);
976 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
977 pdesc->len = dlen + len;
978 if (pdesc->len > 0x8000)
979 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
980 pdesc->addr,
981 &pce_dev->ce_in_src_desc_index);
982 } else {
983 pce_dev->ce_in_src_desc_index++;
984 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
985 return -ENOMEM;
986 pdesc++;
987 pdesc->len = len;
988 pdesc->addr = sg_dma_address(sg);
989 if (pdesc->len > 0x8000)
990 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
991 sg_dma_address(sg),
992 &pce_dev->ce_in_src_desc_index);
993 }
994 if (nbytes > 0)
995 sg = sg_next(sg);
996 }
997 return 0;
998}
999
1000static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1001 unsigned int pmem, unsigned int nbytes)
1002{
1003 unsigned int dlen;
1004 struct dmov_desc *pdesc;
1005
1006 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1007 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1008 if (dlen == 0) {
1009 pdesc->addr = pmem;
1010 pdesc->len = nbytes;
1011 } else if (pmem == (pdesc->addr + dlen)) {
1012 pdesc->len = dlen + nbytes;
1013 } else {
1014 pce_dev->ce_in_src_desc_index++;
1015 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1016 return -ENOMEM;
1017 pdesc++;
1018 pdesc->len = nbytes;
1019 pdesc->addr = pmem;
1020 }
1021 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1022 pdesc->len += nbytes;
1023
1024 return 0;
1025}
1026
1027static void _chain_buffer_in_init(struct qce_device *pce_dev)
1028{
1029 struct dmov_desc *pdesc;
1030
1031 pce_dev->ce_in_src_desc_index = 0;
1032 pce_dev->ce_in_dst_desc_index = 0;
1033 pdesc = pce_dev->ce_in_src_desc;
1034 pdesc->len = 0;
1035}
1036
1037static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
1038{
1039 struct dmov_desc *pdesc;
1040 dmov_sg *pcmd;
1041
1042 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1043 pdesc->len |= ADM_DESC_LAST;
1044 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1045 pdesc->len |= ADM_DESC_LAST;
1046
1047 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1048 pcmd->cmd |= CMD_LC;
1049}
1050
1051#ifdef QCE_DEBUG
1052static void _ce_in_dump(struct qce_device *pce_dev)
1053{
1054 int i;
1055 struct dmov_desc *pdesc;
1056
1057 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
1058 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1059 pdesc = pce_dev->ce_in_src_desc + i;
1060 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1061 pdesc->len);
1062 }
1063 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1064 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1065 pdesc = pce_dev->ce_in_dst_desc + i;
1066 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1067 pdesc->len);
1068 }
1069};
1070
1071static void _ce_out_dump(struct qce_device *pce_dev)
1072{
1073 int i;
1074 struct dmov_desc *pdesc;
1075
1076 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1077 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1078 pdesc = pce_dev->ce_out_src_desc + i;
1079 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1080 pdesc->len);
1081 }
1082
1083 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
1084 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1085 pdesc = pce_dev->ce_out_dst_desc + i;
1086 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1087 pdesc->len);
1088 }
1089};
1090
1091#else
1092
1093static void _ce_in_dump(struct qce_device *pce_dev)
1094{
1095};
1096
1097static void _ce_out_dump(struct qce_device *pce_dev)
1098{
1099};
1100
1101#endif
1102
1103static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1104 struct scatterlist *sg, unsigned int nbytes)
1105{
1106 unsigned int len;
1107 unsigned int dlen;
1108 struct dmov_desc *pdesc;
1109
1110 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1111 if (nbytes > 0x8000)
1112 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1113 &pce_dev->ce_out_src_desc_index);
1114 else
1115 pdesc->len = nbytes;
1116
1117 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1118 /*
1119 * Two consective chunks may be handled by the old
1120 * buffer descriptor.
1121 */
1122 while (nbytes > 0) {
1123 len = min(nbytes, sg_dma_len(sg));
1124 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1125 nbytes -= len;
1126 if (dlen == 0) {
1127 pdesc->addr = sg_dma_address(sg);
1128 pdesc->len = len;
1129 if (pdesc->len > 0x8000)
1130 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1131 sg_dma_address(sg),
1132 &pce_dev->ce_out_dst_desc_index);
1133 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1134 pdesc->len = dlen + len;
1135 if (pdesc->len > 0x8000)
1136 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1137 pdesc->addr,
1138 &pce_dev->ce_out_dst_desc_index);
1139
1140 } else {
1141 pce_dev->ce_out_dst_desc_index++;
1142 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1143 return -EIO;
1144 pdesc++;
1145 pdesc->len = len;
1146 pdesc->addr = sg_dma_address(sg);
1147 if (pdesc->len > 0x8000)
1148 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1149 sg_dma_address(sg),
1150 &pce_dev->ce_out_dst_desc_index);
1151
1152 }
1153 if (nbytes > 0)
1154 sg = sg_next(sg);
1155 }
1156 return 0;
1157}
1158
1159static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1160 unsigned int pmem, unsigned int nbytes)
1161{
1162 unsigned int dlen;
1163 struct dmov_desc *pdesc;
1164
1165 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1166 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1167
1168 if (dlen == 0) {
1169 pdesc->addr = pmem;
1170 pdesc->len = nbytes;
1171 } else if (pmem == (pdesc->addr + dlen)) {
1172 pdesc->len = dlen + nbytes;
1173 } else {
1174 pce_dev->ce_out_dst_desc_index++;
1175 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1176 return -EIO;
1177 pdesc++;
1178 pdesc->len = nbytes;
1179 pdesc->addr = pmem;
1180 }
1181 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1182 pdesc->len += nbytes;
1183
1184 return 0;
1185};
1186
1187static void _chain_buffer_out_init(struct qce_device *pce_dev)
1188{
1189 struct dmov_desc *pdesc;
1190
1191 pce_dev->ce_out_dst_desc_index = 0;
1192 pce_dev->ce_out_src_desc_index = 0;
1193 pdesc = pce_dev->ce_out_dst_desc;
1194 pdesc->len = 0;
1195};
1196
1197static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
1198{
1199 struct dmov_desc *pdesc;
1200 dmov_sg *pcmd;
1201
1202 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1203 pdesc->len |= ADM_DESC_LAST;
1204 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1205 pdesc->len |= ADM_DESC_LAST;
1206 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1207 pcmd->cmd |= CMD_LC;
1208};
1209
1210static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1211 unsigned int result, struct msm_dmov_errdata *err)
1212{
1213 struct qce_device *pce_dev;
1214
1215 pce_dev = (struct qce_device *) cmd_ptr->user;
1216 if (result != ADM_STATUS_OK) {
1217 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1218 result);
1219 pce_dev->chan_ce_in_status = -1;
1220 } else {
1221 pce_dev->chan_ce_in_status = 0;
1222 }
1223
1224 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1225 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1226 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1227 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1228
1229 /* done */
1230 _aead_complete(pce_dev);
1231 }
1232};
1233
1234static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1235 unsigned int result, struct msm_dmov_errdata *err)
1236{
1237 struct qce_device *pce_dev;
1238
1239 pce_dev = (struct qce_device *) cmd_ptr->user;
1240 if (result != ADM_STATUS_OK) {
1241 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1242 result);
1243 pce_dev->chan_ce_out_status = -1;
1244 } else {
1245 pce_dev->chan_ce_out_status = 0;
1246 };
1247
1248 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1249 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1250 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1251 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1252
1253 /* done */
1254 _aead_complete(pce_dev);
1255 }
1256
1257};
1258
1259static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1260 unsigned int result, struct msm_dmov_errdata *err)
1261{
1262 struct qce_device *pce_dev;
1263
1264 pce_dev = (struct qce_device *) cmd_ptr->user;
1265 if (result != ADM_STATUS_OK) {
1266 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1267 result);
1268 pce_dev->chan_ce_in_status = -1;
1269 } else {
1270 pce_dev->chan_ce_in_status = 0;
1271 }
1272 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1273 _sha_complete(pce_dev);
1274};
1275
1276static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1277 unsigned int result, struct msm_dmov_errdata *err)
1278{
1279 struct qce_device *pce_dev;
1280
1281 pce_dev = (struct qce_device *) cmd_ptr->user;
1282 if (result != ADM_STATUS_OK) {
1283 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1284 result);
1285 pce_dev->chan_ce_in_status = -1;
1286 } else {
1287 pce_dev->chan_ce_in_status = 0;
1288 }
1289
1290 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1291 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1292 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1293 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1294
1295 /* done */
1296 _ablk_cipher_complete(pce_dev);
1297 }
1298};
1299
1300static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1301 unsigned int result, struct msm_dmov_errdata *err)
1302{
1303 struct qce_device *pce_dev;
1304
1305 pce_dev = (struct qce_device *) cmd_ptr->user;
1306 if (result != ADM_STATUS_OK) {
1307 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1308 result);
1309 pce_dev->chan_ce_out_status = -1;
1310 } else {
1311 pce_dev->chan_ce_out_status = 0;
1312 };
1313
1314 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1315 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1316 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1317 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1318
1319 /* done */
1320 _ablk_cipher_complete(pce_dev);
1321 }
1322};
1323
1324
1325static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1326 unsigned int result, struct msm_dmov_errdata *err)
1327{
1328 struct qce_device *pce_dev;
1329
1330 pce_dev = (struct qce_device *) cmd_ptr->user;
1331 if (result != ADM_STATUS_OK) {
1332 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1333 result);
1334 pce_dev->chan_ce_in_status = -1;
1335 } else {
1336 pce_dev->chan_ce_in_status = 0;
1337 }
1338
1339 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1340 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1341 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1342 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1343
1344 /* done */
1345 _ablk_cipher_use_pmem_complete(pce_dev);
1346 }
1347};
1348
1349static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1350 unsigned int result, struct msm_dmov_errdata *err)
1351{
1352 struct qce_device *pce_dev;
1353
1354 pce_dev = (struct qce_device *) cmd_ptr->user;
1355 if (result != ADM_STATUS_OK) {
1356 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1357 result);
1358 pce_dev->chan_ce_out_status = -1;
1359 } else {
1360 pce_dev->chan_ce_out_status = 0;
1361 };
1362
1363 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1364 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1365 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1366 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1367
1368 /* done */
1369 _ablk_cipher_use_pmem_complete(pce_dev);
1370 }
1371};
1372
1373static int _setup_cmd_template(struct qce_device *pce_dev)
1374{
1375 dmov_sg *pcmd;
1376 struct dmov_desc *pdesc;
1377 unsigned char *vaddr;
1378 int i = 0;
1379
1380 /* Divide up the 4K coherent memory */
1381
1382 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1383 vaddr = pce_dev->coh_vmem;
1384 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1385 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1386 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1387 (vaddr - pce_dev->coh_vmem);
1388 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1389
1390 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1391 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1392 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1393 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1394 (vaddr - pce_dev->coh_vmem);
1395 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1396
1397 /* 3. ce_in channel command list of one scatter gather command */
1398 pce_dev->cmd_list_ce_in = vaddr;
1399 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1400 + (vaddr - pce_dev->coh_vmem);
1401 vaddr = vaddr + sizeof(dmov_sg);
1402
1403 /* 4. authentication result. */
1404 pce_dev->dig_result = vaddr;
1405 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1406 (vaddr - pce_dev->coh_vmem);
1407 vaddr = vaddr + SHA256_DIGESTSIZE;
1408
1409 /* 5. ce_out channel command list of one scatter gather command */
1410 pce_dev->cmd_list_ce_out = vaddr;
1411 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1412 + (vaddr - pce_dev->coh_vmem);
1413 vaddr = vaddr + sizeof(dmov_sg);
1414
1415 /* 6. ce_out channel command src descriptors, 1 entry */
1416 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1417 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1418 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1419 + (vaddr - pce_dev->coh_vmem);
1420 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1421
1422 /* 7. ce_out channel command dst descriptors, 128 entries. */
1423 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1424 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1425 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1426 + (vaddr - pce_dev->coh_vmem);
1427 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1428
1429 /* 8. pad area. */
1430 pce_dev->ce_pad = vaddr;
1431 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1432 (vaddr - pce_dev->coh_vmem);
1433
1434 /* Padding length is set to twice for worst case scenario in AES-CCM */
1435 vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
1436
1437 /* 9. ce_in channel command pointer list. */
1438 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1439 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1440 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1441 (vaddr - pce_dev->coh_vmem);
1442 vaddr = vaddr + sizeof(unsigned char *);
1443
1444 /* 10. ce_ou channel command pointer list. */
1445 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1446 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1447 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1448 (vaddr - pce_dev->coh_vmem);
1449 vaddr = vaddr + sizeof(unsigned char *);
1450
1451 /* 11. throw away area to store by-pass data from ce_out. */
1452 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1453 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1454 + (vaddr - pce_dev->coh_vmem);
Mona Hossaine1b13f82011-08-30 09:35:49 -07001455 pce_dev->ce_out_ignore_size = QCE_BUF_SIZE - (vaddr -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 pce_dev->coh_vmem); /* at least 1.5 K of space */
1457 /*
1458 * The first command of command list ce_in is for the input of
1459 * concurrent operation of encrypt/decrypt or for the input
1460 * of authentication.
1461 */
1462 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1463 /* swap byte and half word , dst crci , scatter gather */
1464 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1465 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1466 pdesc = pce_dev->ce_in_src_desc;
1467 pdesc->addr = 0; /* to be filled in each operation */
1468 pdesc->len = 0; /* to be filled in each operation */
1469 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1470
1471 pdesc = pce_dev->ce_in_dst_desc;
1472 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1473 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1474 pdesc->len = 0; /* to be filled in each operation */
1475 pdesc++;
1476 }
1477 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1478 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1479 DST_INDEX_SG_CMD(0);
1480 pcmd++;
1481
1482 /* setup command pointer list */
1483 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1484 DMOV_CMD_ADDR((unsigned int)
1485 pce_dev->phy_cmd_list_ce_in));
1486 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1487 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1488 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1489 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1490 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1491 pce_dev->crci_in);
1492
1493
1494 /*
1495 * The first command in the command list ce_out.
1496 * It is for encry/decryp output.
1497 * If hashing only, ce_out is not used.
1498 */
1499 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1500 /* swap byte, half word, source crci, scatter gather */
1501 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1502 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1503
1504 pdesc = pce_dev->ce_out_src_desc;
1505 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1506 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1507 pdesc->len = 0; /* to be filled in each operation */
1508 pdesc++;
1509 }
1510 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1511
1512 pdesc = pce_dev->ce_out_dst_desc;
1513 pdesc->addr = 0; /* to be filled in each operation */
1514 pdesc->len = 0; /* to be filled in each operation */
1515 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1516 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1517 DST_INDEX_SG_CMD(0);
1518
1519 pcmd++;
1520
1521 /* setup command pointer list */
1522 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1523 DMOV_CMD_ADDR((unsigned int)pce_dev->
1524 phy_cmd_list_ce_out));
1525
1526 pce_dev->chan_ce_out_cmd->user = pce_dev;
1527 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1528 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1529 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1530 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(1,
1531 pce_dev->crci_out);
1532
1533 return 0;
1534};
1535
1536static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1537{
1538
1539 if (ce_in)
1540 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1541 else
1542 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1543
1544 if (ce_out)
1545 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1546 else
1547 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1548
1549 if (ce_in)
1550 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1551 pce_dev->chan_ce_in_cmd);
1552 if (ce_out)
1553 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1554 pce_dev->chan_ce_out_cmd);
1555
1556 return 0;
1557};
1558
1559int qce_aead_req(void *handle, struct qce_req *q_req)
1560{
1561 struct qce_device *pce_dev = (struct qce_device *) handle;
1562 struct aead_request *areq = (struct aead_request *) q_req->areq;
1563 uint32_t authsize = q_req->authsize;
1564 uint32_t totallen_in, totallen_out, out_len;
1565 uint32_t pad_len_in, pad_len_out;
1566 uint32_t pad_mac_len_out, pad_ptx_len_out;
1567 int rc = 0;
1568
1569 if (q_req->dir == QCE_ENCRYPT) {
1570 q_req->cryptlen = areq->cryptlen;
1571 totallen_in = q_req->cryptlen + areq->assoclen;
1572 totallen_out = q_req->cryptlen + authsize + areq->assoclen;
1573 out_len = areq->cryptlen + authsize;
1574 pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
1575 totallen_in;
1576 pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
1577 authsize;
1578 pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
1579 q_req->cryptlen;
1580 pad_len_out = pad_ptx_len_out + pad_mac_len_out;
1581 totallen_out += pad_len_out;
1582 } else {
1583 q_req->cryptlen = areq->cryptlen - authsize;
1584 totallen_in = areq->cryptlen + areq->assoclen;
1585 totallen_out = q_req->cryptlen + areq->assoclen;
1586 out_len = areq->cryptlen - authsize;
1587 pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
1588 areq->cryptlen;
1589 pad_len_out = pad_len_in + authsize;
1590 totallen_out += pad_len_out;
1591 }
1592
1593 _chain_buffer_in_init(pce_dev);
1594 _chain_buffer_out_init(pce_dev);
1595
1596 pce_dev->assoc_nents = 0;
1597 pce_dev->src_nents = 0;
1598 pce_dev->dst_nents = 0;
1599 pce_dev->ivsize = q_req->ivsize;
1600 pce_dev->authsize = q_req->authsize;
1601
1602 /* associated data input */
1603 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1604 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1605 DMA_TO_DEVICE);
1606 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1607 rc = -ENOMEM;
1608 goto bad;
1609 }
1610 /* cipher input */
1611 pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
1612 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1613 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1614 DMA_TO_DEVICE);
1615 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
1616 rc = -ENOMEM;
1617 goto bad;
1618 }
1619 /* pad data in */
1620 if (pad_len_in) {
1621 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1622 pad_len_in) < 0) {
1623 rc = -ENOMEM;
1624 goto bad;
1625 }
1626 }
1627
1628 /* ignore associated data */
1629 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1630 areq->assoclen) < 0) {
1631 rc = -ENOMEM;
1632 goto bad;
1633 }
1634 /* cipher + mac output for encryption */
1635 if (areq->src != areq->dst) {
1636 pce_dev->dst_nents = count_sg(areq->dst, out_len);
1637 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1638 DMA_FROM_DEVICE);
1639 };
1640 if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
1641 rc = -ENOMEM;
1642 goto bad;
1643 }
1644 /* pad data out */
1645 if (pad_len_out) {
1646 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1647 pad_len_out) < 0) {
1648 rc = -ENOMEM;
1649 goto bad;
1650 }
1651 }
1652
1653 /* finalize the ce_in and ce_out channels command lists */
1654 _ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
1655 _ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
1656
1657 /* set up crypto device */
1658 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
1659 if (rc < 0)
1660 goto bad;
1661
1662 /* setup for callback, and issue command to adm */
1663 pce_dev->areq = q_req->areq;
1664 pce_dev->qce_cb = q_req->qce_cb;
1665
1666 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
1667 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
1668
1669 _ce_in_dump(pce_dev);
1670 _ce_out_dump(pce_dev);
1671
1672 rc = _qce_start_dma(pce_dev, true, true);
1673 if (rc == 0)
1674 return 0;
1675bad:
1676 if (pce_dev->assoc_nents) {
1677 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1678 DMA_TO_DEVICE);
1679 }
1680
1681 if (pce_dev->src_nents) {
1682 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1683 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1684 DMA_TO_DEVICE);
1685 }
1686 if (pce_dev->dst_nents) {
1687 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1688 DMA_FROM_DEVICE);
1689 }
1690 return rc;
1691}
1692EXPORT_SYMBOL(qce_aead_req);
1693
1694int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
1695{
1696 int rc = 0;
1697 struct qce_device *pce_dev = (struct qce_device *) handle;
1698 struct ablkcipher_request *areq = (struct ablkcipher_request *)
1699 c_req->areq;
1700
1701 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
1702 - areq->nbytes;
1703
1704 _chain_buffer_in_init(pce_dev);
1705 _chain_buffer_out_init(pce_dev);
1706
1707 pce_dev->src_nents = 0;
1708 pce_dev->dst_nents = 0;
1709
1710 /* cipher input */
1711 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
1712
1713 if (c_req->use_pmem != 1)
1714 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1715 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1716 DMA_TO_DEVICE);
1717 else
1718 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
1719 areq->src);
1720
1721 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
1722 rc = -ENOMEM;
1723 goto bad;
1724 }
1725
1726 /* cipher output */
1727 if (areq->src != areq->dst) {
1728 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
1729 if (c_req->use_pmem != 1)
1730 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1731 DMA_FROM_DEVICE);
1732 else
1733 dma_map_pmem_sg(&c_req->pmem->dst[0],
1734 pce_dev->dst_nents, areq->dst);
1735 };
1736 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
1737 rc = -ENOMEM;
1738 goto bad;
1739 }
1740
1741 /* pad data */
1742 if (pad_len) {
1743 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1744 pad_len) < 0) {
1745 rc = -ENOMEM;
1746 goto bad;
1747 }
1748 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1749 pad_len) < 0) {
1750 rc = -ENOMEM;
1751 goto bad;
1752 }
1753 }
1754
1755 /* finalize the ce_in and ce_out channels command lists */
1756 _ce_in_final(pce_dev, areq->nbytes + pad_len);
1757 _ce_out_final(pce_dev, areq->nbytes + pad_len);
1758
1759 _ce_in_dump(pce_dev);
1760 _ce_out_dump(pce_dev);
1761
1762 /* set up crypto device */
1763 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
1764 if (rc < 0)
1765 goto bad;
1766
1767 /* setup for callback, and issue command to adm */
1768 pce_dev->areq = areq;
1769 pce_dev->qce_cb = c_req->qce_cb;
1770 if (c_req->use_pmem == 1) {
1771 pce_dev->chan_ce_in_cmd->complete_func =
1772 _ablk_cipher_ce_in_call_back_pmem;
1773 pce_dev->chan_ce_out_cmd->complete_func =
1774 _ablk_cipher_ce_out_call_back_pmem;
1775 } else {
1776 pce_dev->chan_ce_in_cmd->complete_func =
1777 _ablk_cipher_ce_in_call_back;
1778 pce_dev->chan_ce_out_cmd->complete_func =
1779 _ablk_cipher_ce_out_call_back;
1780 }
1781 rc = _qce_start_dma(pce_dev, true, true);
1782
1783 if (rc == 0)
1784 return 0;
1785bad:
1786 if (c_req->use_pmem != 1) {
1787 if (pce_dev->dst_nents) {
1788 dma_unmap_sg(pce_dev->pdev, areq->dst,
1789 pce_dev->dst_nents, DMA_FROM_DEVICE);
1790 }
1791 if (pce_dev->src_nents) {
1792 dma_unmap_sg(pce_dev->pdev, areq->src,
1793 pce_dev->src_nents,
1794 (areq->src == areq->dst) ?
1795 DMA_BIDIRECTIONAL :
1796 DMA_TO_DEVICE);
1797 }
1798 }
1799 return rc;
1800}
1801EXPORT_SYMBOL(qce_ablk_cipher_req);
1802
1803int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
1804{
1805 struct qce_device *pce_dev = (struct qce_device *) handle;
1806 int rc;
1807 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
1808 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
1809
1810 _chain_buffer_in_init(pce_dev);
1811 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
1812 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
1813 DMA_TO_DEVICE);
1814
1815 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
1816 rc = -ENOMEM;
1817 goto bad;
1818 }
1819
1820 if (pad_len) {
1821 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1822 pad_len) < 0) {
1823 rc = -ENOMEM;
1824 goto bad;
1825 }
1826 }
1827 _ce_in_final(pce_dev, sreq->size + pad_len);
1828
1829 _ce_in_dump(pce_dev);
1830
1831 rc = _ce_setup_hash(pce_dev, sreq);
1832
1833 if (rc < 0)
1834 goto bad;
1835
1836 pce_dev->areq = areq;
1837 pce_dev->qce_cb = sreq->qce_cb;
1838 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
1839
1840 rc = _qce_start_dma(pce_dev, true, false);
1841
1842 if (rc == 0)
1843 return 0;
1844bad:
1845 if (pce_dev->src_nents) {
1846 dma_unmap_sg(pce_dev->pdev, sreq->src,
1847 pce_dev->src_nents, DMA_TO_DEVICE);
1848 }
1849
1850 return rc;
1851}
1852EXPORT_SYMBOL(qce_process_sha_req);
1853
1854/* crypto engine open function. */
1855void *qce_open(struct platform_device *pdev, int *rc)
1856{
1857 struct qce_device *pce_dev;
1858 struct resource *resource;
1859 struct clk *ce_core_clk;
1860 struct clk *ce_clk;
1861
1862 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
1863 if (!pce_dev) {
1864 *rc = -ENOMEM;
1865 dev_err(&pdev->dev, "Can not allocate memory\n");
1866 return NULL;
1867 }
1868 pce_dev->pdev = &pdev->dev;
1869
1870 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1871 if (!resource) {
1872 *rc = -ENXIO;
1873 dev_err(pce_dev->pdev, "Missing MEM resource\n");
1874 goto err_pce_dev;
1875 };
1876 pce_dev->phy_iobase = resource->start;
1877 pce_dev->iobase = ioremap_nocache(resource->start,
1878 resource->end - resource->start + 1);
1879 if (!pce_dev->iobase) {
1880 *rc = -ENOMEM;
1881 dev_err(pce_dev->pdev, "Can not map io memory\n");
1882 goto err_pce_dev;
1883 }
1884
1885 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1886 GFP_KERNEL);
1887 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1888 GFP_KERNEL);
1889 if (pce_dev->chan_ce_in_cmd == NULL ||
1890 pce_dev->chan_ce_out_cmd == NULL) {
1891 dev_err(pce_dev->pdev, "Can not allocate memory\n");
1892 *rc = -ENOMEM;
1893 goto err_dm_chan_cmd;
1894 }
1895
1896 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1897 "crypto_channels");
1898 if (!resource) {
1899 *rc = -ENXIO;
1900 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
1901 goto err_dm_chan_cmd;
1902 };
1903 pce_dev->chan_ce_in = resource->start;
1904 pce_dev->chan_ce_out = resource->end;
1905 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1906 "crypto_crci_in");
1907 if (!resource) {
1908 *rc = -ENXIO;
1909 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
1910 goto err_dm_chan_cmd;
1911 };
1912 pce_dev->crci_in = resource->start;
1913 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1914 "crypto_crci_out");
1915 if (!resource) {
1916 *rc = -ENXIO;
1917 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
1918 goto err_dm_chan_cmd;
1919 };
1920 pce_dev->crci_out = resource->start;
1921
1922 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
1923 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
1924
1925 if (pce_dev->coh_vmem == NULL) {
1926 *rc = -ENOMEM;
1927 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
1928 goto err;
1929 }
1930
1931 /* Get CE core clk */
1932 ce_core_clk = clk_get(pce_dev->pdev, "ce_clk");
1933 if (IS_ERR(ce_core_clk)) {
1934 *rc = PTR_ERR(ce_core_clk);
1935 goto err;
1936 }
1937 pce_dev->ce_core_clk = ce_core_clk;
1938 /* Get CE clk */
1939 ce_clk = clk_get(pce_dev->pdev, "ce_pclk");
1940 if (IS_ERR(ce_clk)) {
1941 *rc = PTR_ERR(ce_clk);
1942 clk_put(pce_dev->ce_core_clk);
1943 goto err;
1944 }
1945 pce_dev->ce_clk = ce_clk;
1946
1947 /* Enable CE core clk */
1948 *rc = clk_enable(pce_dev->ce_core_clk);
1949 if (*rc) {
1950 clk_put(pce_dev->ce_core_clk);
1951 clk_put(pce_dev->ce_clk);
1952 goto err;
1953 } else {
1954 /* Enable CE clk */
1955 *rc = clk_enable(pce_dev->ce_clk);
1956 if (*rc) {
1957 clk_disable(pce_dev->ce_core_clk);
1958 clk_put(pce_dev->ce_core_clk);
1959 clk_put(pce_dev->ce_clk);
1960 goto err;
1961
1962 }
1963 }
1964 _setup_cmd_template(pce_dev);
1965
1966 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1967 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1968
1969 if (_init_ce_engine(pce_dev)) {
1970 *rc = -ENXIO;
1971 goto err;
1972 }
1973 *rc = 0;
1974 return pce_dev;
1975
1976err:
1977 if (pce_dev->coh_vmem)
Mona Hossaine1b13f82011-08-30 09:35:49 -07001978 dma_free_coherent(pce_dev->pdev, QCE_BUF_SIZE,
1979 pce_dev->coh_vmem, pce_dev->coh_pmem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980err_dm_chan_cmd:
1981 kfree(pce_dev->chan_ce_in_cmd);
1982 kfree(pce_dev->chan_ce_out_cmd);
1983 if (pce_dev->iobase)
1984 iounmap(pce_dev->iobase);
1985
1986err_pce_dev:
1987
1988 kfree(pce_dev);
1989
1990 return NULL;
1991}
1992EXPORT_SYMBOL(qce_open);
1993
1994/* crypto engine close function. */
1995int qce_close(void *handle)
1996{
1997 struct qce_device *pce_dev = (struct qce_device *) handle;
1998
1999 if (handle == NULL)
2000 return -ENODEV;
2001 if (pce_dev->iobase)
2002 iounmap(pce_dev->iobase);
2003
2004 if (pce_dev->coh_vmem)
2005 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
2006 pce_dev->coh_pmem);
2007 clk_disable(pce_dev->ce_clk);
2008 clk_disable(pce_dev->ce_core_clk);
2009
2010 clk_put(pce_dev->ce_clk);
2011 clk_put(pce_dev->ce_core_clk);
2012
2013 kfree(pce_dev->chan_ce_in_cmd);
2014 kfree(pce_dev->chan_ce_out_cmd);
2015 kfree(handle);
2016
2017 return 0;
2018}
2019EXPORT_SYMBOL(qce_close);
2020
2021int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2022{
2023 if (ce_support == NULL)
2024 return -EINVAL;
2025
2026 ce_support->sha1_hmac_20 = false;
2027 ce_support->sha1_hmac = false;
2028 ce_support->sha256_hmac = false;
2029 ce_support->sha_hmac = false;
2030 ce_support->cmac = true;
2031 ce_support->aes_key_192 = false;
2032 ce_support->aes_xts = true;
2033 ce_support->aes_ccm = true;
2034 ce_support->ota = false;
2035 return 0;
2036}
2037EXPORT_SYMBOL(qce_hw_support);
2038
2039MODULE_LICENSE("GPL v2");
2040MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2041MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossaine1b13f82011-08-30 09:35:49 -07002042MODULE_VERSION("2.06");