blob: 3664af8ba2236fe585a4c56ea84611e063f14963 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070027#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#include <crypto/hash.h>
29#include <crypto/sha.h>
30#include <mach/dma.h>
31#include <mach/clk.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070032
33#include "qce.h"
34#include "qcryptohw_40.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035
36/* ADM definitions */
37#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
38#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
39#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
40#define ADM_DESC_LAST (1 << 31)
41
42/* Data xfer between DM and CE in blocks of 16 bytes */
43#define ADM_CE_BLOCK_SIZE 16
44
45#define ADM_DESC_LENGTH_MASK 0xffff
46#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
47
48struct dmov_desc {
49 uint32_t addr;
50 uint32_t len;
51};
52
53#define ADM_STATUS_OK 0x80000002
54
55/* Misc definitions */
56
57/* QCE max number of descriptor in a descriptor list */
58#define QCE_MAX_NUM_DESC 128
59
Mona Hossaine1b13f82011-08-30 09:35:49 -070060/* QCE BUFFER SIZE */
61#define QCE_BUF_SIZE (2 * PAGE_SIZE)
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063/* State of DM channel */
64enum qce_chan_st_enum {
65 QCE_CHAN_STATE_IDLE = 0,
66 QCE_CHAN_STATE_IN_PROG = 1,
67 QCE_CHAN_STATE_COMP = 2,
68 QCE_CHAN_STATE_LAST
69};
70
71/*
72 * CE HW device structure.
73 * Each engine has an instance of the structure.
74 * Each engine can only handle one crypto operation at one time. It is up to
75 * the sw above to ensure single threading of operation on an engine.
76 */
77struct qce_device {
78 struct device *pdev; /* Handle to platform_device structure */
79 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
80 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
81 void __iomem *iobase; /* Virtual io base of CE HW */
82 unsigned int phy_iobase; /* Physical io base of CE HW */
83 struct clk *ce_core_clk; /* Handle to CE clk */
84 struct clk *ce_clk; /* Handle to CE clk */
85 unsigned int crci_in; /* CRCI for CE DM IN Channel */
86 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
87 unsigned int chan_ce_in; /* ADM channel used for CE input
88 * and auth result if authentication
89 * only operation. */
90 unsigned int chan_ce_out; /* ADM channel used for CE output,
91 and icv for esp */
92 unsigned int *cmd_pointer_list_ce_in;
93 dma_addr_t phy_cmd_pointer_list_ce_in;
94
95 unsigned int *cmd_pointer_list_ce_out;
96 dma_addr_t phy_cmd_pointer_list_ce_out;
97
98 unsigned char *cmd_list_ce_in;
99 dma_addr_t phy_cmd_list_ce_in;
100
101 unsigned char *cmd_list_ce_out;
102 dma_addr_t phy_cmd_list_ce_out;
103
104 struct dmov_desc *ce_out_src_desc;
105 dma_addr_t phy_ce_out_src_desc;
106
107 struct dmov_desc *ce_out_dst_desc;
108 dma_addr_t phy_ce_out_dst_desc;
109
110 struct dmov_desc *ce_in_src_desc;
111 dma_addr_t phy_ce_in_src_desc;
112
113 struct dmov_desc *ce_in_dst_desc;
114 dma_addr_t phy_ce_in_dst_desc;
115
116 unsigned char *ce_out_ignore;
117 dma_addr_t phy_ce_out_ignore;
118
119 unsigned char *ce_pad;
120 dma_addr_t phy_ce_pad;
121
122 struct msm_dmov_cmd *chan_ce_in_cmd;
123 struct msm_dmov_cmd *chan_ce_out_cmd;
124
125 uint32_t ce_out_ignore_size;
126
127 int ce_out_dst_desc_index;
128 int ce_in_dst_desc_index;
129
130 int ce_out_src_desc_index;
131 int ce_in_src_desc_index;
132
133 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
134 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
135
136 int chan_ce_in_status; /* chan ce_in status */
137 int chan_ce_out_status; /* chan ce_out status */
138
139 unsigned char *dig_result;
140 dma_addr_t phy_dig_result;
141
142 /* cached aes key */
143 uint32_t cipher_key[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)];
144
145 uint32_t cipher_key_size; /* cached aes key size in bytes */
146 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
147
148 int assoc_nents;
149 int ivsize;
150 int authsize;
151 int src_nents;
152 int dst_nents;
153
154 void *areq;
155 enum qce_cipher_mode_enum mode;
156
157 dma_addr_t phy_iv_in;
158};
159
160/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
161static uint32_t _std_init_vector_sha1[] = {
162 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
163};
164/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
165static uint32_t _std_init_vector_sha256[] = {
166 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
167 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
168};
169
170static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
171 unsigned int len)
172{
173 unsigned n;
174
175 n = len / sizeof(uint32_t) ;
176 for (; n > 0; n--) {
177 *iv = ((*b << 24) & 0xff000000) |
178 (((*(b+1)) << 16) & 0xff0000) |
179 (((*(b+2)) << 8) & 0xff00) |
180 (*(b+3) & 0xff);
181 b += sizeof(uint32_t);
182 iv++;
183 }
184
185 n = len % sizeof(uint32_t);
186 if (n == 3) {
187 *iv = ((*b << 24) & 0xff000000) |
188 (((*(b+1)) << 16) & 0xff0000) |
189 (((*(b+2)) << 8) & 0xff00) ;
190 } else if (n == 2) {
191 *iv = ((*b << 24) & 0xff000000) |
192 (((*(b+1)) << 16) & 0xff0000) ;
193 } else if (n == 1) {
194 *iv = ((*b << 24) & 0xff000000) ;
195 }
196}
197
198static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
199 unsigned int len)
200{
201 unsigned i, j;
202 unsigned char swap_iv[AES_IV_LENGTH];
203
204 memset(swap_iv, 0, AES_IV_LENGTH);
205 for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
206 swap_iv[i] = b[j];
207 _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
208}
209
210static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
211 unsigned int len)
212{
213 unsigned n = len / sizeof(uint32_t);
214
215 for (; n > 0; n--) {
216 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
217 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
218 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
219 *b++ = (unsigned char) (*iv & 0xff);
220 iv++;
221 }
222 n = len % sizeof(uint32_t);
223 if (n == 3) {
224 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
225 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
226 *b = (unsigned char) ((*iv >> 8) & 0xff);
227 } else if (n == 2) {
228 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
229 *b = (unsigned char) ((*iv >> 16) & 0xff);
230 } else if (n == 1) {
231 *b = (unsigned char) ((*iv >> 24) & 0xff);
232 }
233}
234
235static int count_sg(struct scatterlist *sg, int nbytes)
236{
237 int i;
238
239 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
240 nbytes -= sg->length;
241 return i;
242}
243
244static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
245 struct scatterlist *sg)
246{
247 int i;
248 for (i = 0; i < entries; i++) {
249
250 sg->dma_address = (dma_addr_t)pmem->offset;
251 sg++;
252 pmem++;
253 }
254 return 0;
255}
256
257static int _probe_ce_engine(struct qce_device *pce_dev)
258{
259 unsigned int val;
260 unsigned int rev;
261
262 val = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
263 if (((val & 0xfffffff) != 0x0000042) &&
264 ((val & 0xfffffff) != 0x0000040)) {
265 dev_err(pce_dev->pdev,
266 "Unknown Qualcomm crypto device at 0x%x 0x%x\n",
267 pce_dev->phy_iobase, val);
268 return -EIO;
269 };
270 rev = (val & CRYPTO_CORE_REV_MASK);
271 if (rev == 0x42) {
272 dev_info(pce_dev->pdev,
273 "Qualcomm Crypto 4.2 device found at 0x%x\n",
274 pce_dev->phy_iobase);
275 } else {
276 if (rev == 0x40) {
277 dev_info(pce_dev->pdev,
278 "Qualcomm Crypto 4.0 device found at 0x%x\n",
279 pce_dev->phy_iobase);
280 }
281 }
282
283 dev_info(pce_dev->pdev,
284 "IO base 0x%x, ce_in channel %d, "
285 "ce_out channel %d, "
286 "crci_in %d, crci_out %d\n",
287 (unsigned int) pce_dev->iobase,
288 pce_dev->chan_ce_in, pce_dev->chan_ce_out,
289 pce_dev->crci_in, pce_dev->crci_out);
290
291 pce_dev->cipher_key_size = 0;
292
293 return 0;
294};
295
296static int _init_ce_engine(struct qce_device *pce_dev)
297{
298 unsigned int val;
299
300 /* Reset ce */
301 clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT);
302 clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT);
303 /*
304 * Ensure previous instruction (any writes to CLK registers)
305 * to toggle the CLK reset lines was completed.
306 */
307 dsb();
308 /* configure ce */
309 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
310 (1 << CRYPTO_MASK_OP_DONE_INTR) |
311 (1 << CRYPTO_MASK_ERR_INTR);
312 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
313 /*
314 * Ensure previous instruction (writel_relaxed to config register bit)
315 * was completed.
316 */
317 dsb();
318 val = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG);
319 if (!val) {
320 dev_err(pce_dev->pdev,
321 "unknown Qualcomm crypto device at 0x%x\n",
322 pce_dev->phy_iobase);
323 return -EIO;
324 };
325 if (_probe_ce_engine(pce_dev) < 0)
326 return -EIO;
327 return 0;
328};
329
330static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq)
331{
332 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
333 uint32_t diglen;
334 int i;
335 uint32_t auth_cfg = 0;
336 bool sha1 = false;
337
338 if (sreq->alg == QCE_HASH_AES_CMAC) {
339 uint32_t authkey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
340 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
341 uint32_t authklen32 = sreq->authklen/(sizeof(uint32_t));
342 /* Clear auth_ivn, auth_keyn registers */
343 for (i = 0; i < 16; i++) {
344 writel_relaxed(0, (pce_dev->iobase +
345 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
346 writel_relaxed(0, (pce_dev->iobase +
347 (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t))));
348 }
349 /* write auth_bytecnt 0/1/2/3, start with 0 */
350 for (i = 0; i < 4; i++)
351 writel_relaxed(0, pce_dev->iobase +
352 CRYPTO_AUTH_BYTECNT0_REG +
353 i * sizeof(uint32_t));
354
355 _byte_stream_to_net_words(authkey32, sreq->authkey,
356 sreq->authklen);
357 for (i = 0; i < authklen32; i++)
358 writel_relaxed(authkey32[i], pce_dev->iobase +
359 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
360 /*
361 * write seg_cfg
362 */
363 auth_cfg |= (1 << CRYPTO_LAST);
364 auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE);
365 auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
366 CRYPTO_AUTH_SIZE);
367 auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG;
368
369 switch (sreq->authklen) {
370 case AES128_KEY_SIZE:
371 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
372 CRYPTO_AUTH_KEY_SIZE);
373 break;
374 case AES256_KEY_SIZE:
375 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
376 CRYPTO_AUTH_KEY_SIZE);
377 break;
378 default:
379 break;
380 }
381
382 goto go_proc;
383 }
384
385 /* if not the last, the size has to be on the block boundary */
386 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
387 return -EIO;
388
389 switch (sreq->alg) {
390 case QCE_HASH_SHA1:
391 case QCE_HASH_SHA1_HMAC:
392 diglen = SHA1_DIGEST_SIZE;
393 sha1 = true;
394 break;
395 case QCE_HASH_SHA256:
396 case QCE_HASH_SHA256_HMAC:
397 diglen = SHA256_DIGEST_SIZE;
398 break;
399 default:
400 return -EINVAL;
401 }
402
403 if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
404 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
405 uint32_t hmackey[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
406 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
407 uint32_t hmacklen = sreq->authklen/(sizeof(uint32_t));
408
409 _byte_stream_to_net_words(hmackey, sreq->authkey,
410 sreq->authklen);
411 /* write hmac key */
412 for (i = 0; i < hmacklen; i++)
413 writel_relaxed(hmackey[i], pce_dev->iobase +
414 CRYPTO_AUTH_KEY0_REG + (i * sizeof(uint32_t)));
415
416 auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE);
417 } else {
418 auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE);
419 }
420
421 /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
422
423 if (sreq->first_blk) {
424 if (sha1) {
425 for (i = 0; i < 5; i++)
426 auth32[i] = _std_init_vector_sha1[i];
427 } else {
428 for (i = 0; i < 8; i++)
429 auth32[i] = _std_init_vector_sha256[i];
430 }
431 } else {
432 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
433 }
434
435 for (i = 0; i < 5; i++)
436 writel_relaxed(auth32[i], (pce_dev->iobase +
437 (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t))));
438
439 if ((sreq->alg == QCE_HASH_SHA256) ||
440 (sreq->alg == QCE_HASH_SHA256_HMAC)) {
441 writel_relaxed(auth32[5], pce_dev->iobase +
442 CRYPTO_AUTH_IV5_REG);
443 writel_relaxed(auth32[6], pce_dev->iobase +
444 CRYPTO_AUTH_IV6_REG);
445 writel_relaxed(auth32[7], pce_dev->iobase +
446 CRYPTO_AUTH_IV7_REG);
447 }
448
449 /* write auth_bytecnt 0/1, start with 0 */
450 for (i = 0; i < 4; i++)
451 writel_relaxed(sreq->auth_data[i], (pce_dev->iobase +
452 (CRYPTO_AUTH_BYTECNT0_REG + i * sizeof(uint32_t))));
453
454 /* write seg_cfg */
455 if (sha1)
456 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
457 else
458 auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
459
460 if (sreq->last_blk)
461 auth_cfg |= 1 << CRYPTO_LAST;
462
463 auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
464
465go_proc:
466 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
467
468 /* write seg_cfg */
469 writel_relaxed(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
470
471 /* write seg_size */
472 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
473
474 /* write auth_seg_size */
475 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
476
477 /* write auth_seg_start */
478 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
479 /*
480 * Ensure previous instructions (write to all AUTH registers)
481 * was completed before accessing a register that is not in
482 * in the same 1K range.
483 */
484 dsb();
485
486 writel_relaxed(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
487 /*
488 * Ensure previous instructions (setting all the CE registers)
489 * was completed before writing to GO register
490 */
491 dsb();
492 /* issue go to crypto */
493 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
494 /*
495 * Ensure previous instructions (setting the GO register)
496 * was completed before issuing a DMA transfer request
497 */
498 dsb();
499
500 return 0;
501}
502
503static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
504 uint32_t totallen_in, uint32_t coffset)
505{
506 uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
507 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
508 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
509 0, 0, 0, 0};
510 uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t);
511 int aes_key_chg;
512 int i;
513 uint32_t encr_cfg = 0;
514 uint32_t ivsize = creq->ivsize;
515
516 if (creq->mode == QCE_MODE_XTS)
517 _byte_stream_to_net_words(enckey32, creq->enckey,
518 creq->encklen/2);
519 else
520 _byte_stream_to_net_words(enckey32, creq->enckey,
521 creq->encklen);
522
523 if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
524 uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
525 uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
526 uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
527 uint32_t auth_cfg = 0;
528
529 /* Clear auth_ivn, auth_keyn registers */
530 for (i = 0; i < 16; i++) {
531 writel_relaxed(0, (pce_dev->iobase +
532 (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
533 writel_relaxed(0, (pce_dev->iobase +
534 (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
535 }
536 /* write auth_bytecnt 0/1/2/3, start with 0 */
537 for (i = 0; i < 4; i++)
538 writel_relaxed(0, pce_dev->iobase +
539 CRYPTO_AUTH_BYTECNT0_REG +
540 i * sizeof(uint32_t));
541 /* write auth key */
542 for (i = 0; i < authklen32; i++)
543 writel_relaxed(enckey32[i], pce_dev->iobase +
544 CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
545
546 /* write nonce */
547 _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
548 for (i = 0; i < noncelen32; i++)
549 writel_relaxed(nonce32[i], pce_dev->iobase +
550 CRYPTO_AUTH_INFO_NONCE0_REG +
551 (i*sizeof(uint32_t)));
552
553 auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS);
554 auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
555 auth_cfg |= (1 << CRYPTO_LAST);
556 if (creq->dir == QCE_ENCRYPT)
557 auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
558 else
559 auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
560 auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE);
561 auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
562 if (creq->authklen == AES128_KEY_SIZE)
563 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
564 CRYPTO_AUTH_KEY_SIZE);
565 else {
566 if (creq->authklen == AES256_KEY_SIZE)
567 auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
568 CRYPTO_AUTH_KEY_SIZE);
569 }
570 auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
571 writel_relaxed(auth_cfg, pce_dev->iobase +
572 CRYPTO_AUTH_SEG_CFG_REG);
573 if (creq->dir == QCE_ENCRYPT)
574 writel_relaxed(totallen_in, pce_dev->iobase +
575 CRYPTO_AUTH_SEG_SIZE_REG);
576 else
577 writel_relaxed((totallen_in - creq->authsize),
578 pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
579 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
580 } else {
581 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
582 }
583 /*
584 * Ensure previous instructions (write to all AUTH registers)
585 * was completed before accessing a register that is not in
586 * in the same 1K range.
587 */
588 dsb();
589
590 switch (creq->mode) {
591 case QCE_MODE_ECB:
592 encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
593 break;
594
595 case QCE_MODE_CBC:
596 encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
597 break;
598
599 case QCE_MODE_XTS:
600 encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
601 break;
602
603 case QCE_MODE_CCM:
604 encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
605 break;
606
607 case QCE_MODE_CTR:
608 default:
609 encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
610 break;
611 }
612 pce_dev->mode = creq->mode;
613
614 switch (creq->alg) {
615 case CIPHER_ALG_DES:
616 if (creq->mode != QCE_MODE_ECB) {
617 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
618 writel_relaxed(enciv32[0], pce_dev->iobase +
619 CRYPTO_CNTR0_IV0_REG);
620 writel_relaxed(enciv32[1], pce_dev->iobase +
621 CRYPTO_CNTR1_IV1_REG);
622 }
623 writel_relaxed(enckey32[0], pce_dev->iobase +
624 CRYPTO_ENCR_KEY0_REG);
625 writel_relaxed(enckey32[1], pce_dev->iobase +
626 CRYPTO_ENCR_KEY1_REG);
627 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
628 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
629 break;
630
631 case CIPHER_ALG_3DES:
632 if (creq->mode != QCE_MODE_ECB) {
633 _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
634 writel_relaxed(enciv32[0], pce_dev->iobase +
635 CRYPTO_CNTR0_IV0_REG);
636 writel_relaxed(enciv32[1], pce_dev->iobase +
637 CRYPTO_CNTR1_IV1_REG);
638 }
639 for (i = 0; i < 6; i++)
640 writel_relaxed(enckey32[0], (pce_dev->iobase +
641 (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
642
643 encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
644 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
645 break;
646
647 case CIPHER_ALG_AES:
648 default:
649 if (creq->mode == QCE_MODE_XTS) {
650 uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
651 = {0, 0, 0, 0, 0, 0, 0, 0};
652 uint32_t xtsklen =
653 creq->encklen/(2 * sizeof(uint32_t));
654
655 _byte_stream_to_net_words(xtskey32, (creq->enckey +
656 creq->encklen/2), creq->encklen/2);
657 for (i = 0; i < xtsklen; i++)
658 writel_relaxed(xtskey32[i], pce_dev->iobase +
659 CRYPTO_ENCR_XTS_KEY0_REG +
660 (i * sizeof(uint32_t)));
661
662 writel_relaxed(creq->cryptlen ,
663 pce_dev->iobase +
664 CRYPTO_ENCR_XTS_DU_SIZE_REG);
665 }
666 if (creq->mode != QCE_MODE_ECB) {
667 if (creq->mode == QCE_MODE_XTS)
668 _byte_stream_swap_to_net_words(enciv32,
669 creq->iv, ivsize);
670 else
671 _byte_stream_to_net_words(enciv32, creq->iv,
672 ivsize);
673 for (i = 0; i <= 3; i++)
674 writel_relaxed(enciv32[i], pce_dev->iobase +
675 CRYPTO_CNTR0_IV0_REG +
676 (i * sizeof(uint32_t)));
677 }
678 /* set number of counter bits */
679 writel_relaxed(0xffffffff, pce_dev->iobase +
680 CRYPTO_CNTR_MASK_REG);
681
682 if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
683 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
684 CRYPTO_ENCR_KEY_SZ);
685 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
686 } else {
687 uint32_t key_size;
688
689 if (creq->mode == QCE_MODE_XTS) {
690 key_size = creq->encklen/2;
691 enck_size_in_word = key_size/sizeof(uint32_t);
692 } else {
693 key_size = creq->encklen;
694 }
695
696 switch (key_size) {
697 case AES128_KEY_SIZE:
698 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
699 CRYPTO_ENCR_KEY_SZ);
700 break;
701 case AES256_KEY_SIZE:
702 default:
703 encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
704 CRYPTO_ENCR_KEY_SZ);
705
706 /* check for null key. If null, use hw key*/
707 for (i = 0; i < enck_size_in_word; i++) {
708 if (enckey32[i] != 0)
709 break;
710 }
711 if (i == enck_size_in_word)
712 encr_cfg |= 1 << CRYPTO_USE_HW_KEY;
713 break;
714 } /* end of switch (creq->encklen) */
715
716 encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
717 if (pce_dev->cipher_key_size != creq->encklen)
718 aes_key_chg = 1;
719 else {
720 for (i = 0; i < enck_size_in_word; i++) {
721 if (enckey32[i]
722 != pce_dev->cipher_key[i])
723 break;
724 }
725 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
726 }
727
728 if (aes_key_chg) {
729 for (i = 0; i < enck_size_in_word; i++)
730 writel_relaxed(enckey32[i],
731 pce_dev->iobase +
732 CRYPTO_ENCR_KEY0_REG +
733 (i * sizeof(uint32_t)));
734 pce_dev->cipher_key_size = creq->encklen;
735 for (i = 0; i < enck_size_in_word; i++)
736 pce_dev->cipher_key[i] = enckey32[i];
737 } /*if (aes_key_chg) { */
738 } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
739 break;
740 } /* end of switch (creq->mode) */
741
742 /* write encr seg cfg */
743 encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
744
745 /* write encr seg cfg */
746 writel_relaxed(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
747
748 /* write encr seg size */
749 if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
750 writel_relaxed((creq->cryptlen + creq->authsize),
751 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
752 else
753 writel_relaxed(creq->cryptlen,
754 pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
755 /* write encr seg start */
756 writel_relaxed((coffset & 0xffff),
757 pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
758 /* write seg size */
759 writel_relaxed(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
760 /*
761 * Ensure previous instructions (setting all the CE registers)
762 * was completed before writing to GO register
763 */
764 dsb();
765 /* issue go to crypto */
766 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
767 /*
768 * Ensure previous instructions (setting the GO register)
769 * was completed before issuing a DMA transfer request
770 */
771 dsb();
772 return 0;
773};
774
775static int _aead_complete(struct qce_device *pce_dev)
776{
777 struct aead_request *areq;
778 int i;
779 uint32_t ivsize;
780 uint32_t iv_out[4];
781 unsigned char iv[4 * sizeof(uint32_t)];
782
783 areq = (struct aead_request *) pce_dev->areq;
784 ivsize = pce_dev->ivsize;
785
786 if (areq->src != areq->dst) {
787 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
788 DMA_FROM_DEVICE);
789 }
790 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
791 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
792 DMA_TO_DEVICE);
793
794 if (pce_dev->mode != QCE_MODE_CCM)
795 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
796 ivsize, DMA_TO_DEVICE);
797 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
798 DMA_TO_DEVICE);
799
800 /* get iv out */
801 if ((pce_dev->mode == QCE_MODE_ECB) ||
802 (pce_dev->mode == QCE_MODE_CCM)) {
803 if (pce_dev->mode == QCE_MODE_CCM) {
804 int result;
805 result = readl_relaxed(pce_dev->iobase +
806 CRYPTO_STATUS_REG);
807 result &= (1 << CRYPTO_MAC_FAILED);
808 result |= (pce_dev->chan_ce_in_status |
809 pce_dev->chan_ce_out_status);
810 dsb();
811 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
812 result);
813 } else {
814 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
815 pce_dev->chan_ce_in_status |
816 pce_dev->chan_ce_out_status);
817 }
818 } else {
819 for (i = 0; i < 4; i++)
820 iv_out[i] = readl_relaxed(pce_dev->iobase +
821 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
822
823 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
824 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
825 pce_dev->chan_ce_in_status |
826 pce_dev->chan_ce_out_status);
827 };
828 return 0;
829};
830
831static void _sha_complete(struct qce_device *pce_dev)
832{
833
834 struct ahash_request *areq;
835 uint32_t auth_data[4];
836 uint32_t digest[8];
837 int i;
838
839 areq = (struct ahash_request *) pce_dev->areq;
840 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
841 DMA_TO_DEVICE);
842
843 for (i = 0; i < 4; i++)
844 auth_data[i] = readl_relaxed(pce_dev->iobase +
845 (CRYPTO_AUTH_BYTECNT0_REG +
846 i * sizeof(uint32_t)));
847
848 for (i = 0; i < 8; i++)
849 digest[i] = readl_relaxed(pce_dev->iobase +
850 CRYPTO_AUTH_IV0_REG + (i * sizeof(uint32_t)));
851
852 _net_words_to_byte_stream(digest, pce_dev->dig_result,
853 SHA256_DIGEST_SIZE);
854
855 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
856 pce_dev->chan_ce_in_status);
857};
858
859static int _ablk_cipher_complete(struct qce_device *pce_dev)
860{
861 struct ablkcipher_request *areq;
862 uint32_t iv_out[4];
863 unsigned char iv[4 * sizeof(uint32_t)];
864
865 areq = (struct ablkcipher_request *) pce_dev->areq;
866
867 if (areq->src != areq->dst) {
868 dma_unmap_sg(pce_dev->pdev, areq->dst,
869 pce_dev->dst_nents, DMA_FROM_DEVICE);
870 }
871 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
872 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
873 DMA_TO_DEVICE);
874 /* get iv out */
875 if (pce_dev->mode == QCE_MODE_ECB) {
876 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
877 pce_dev->chan_ce_out_status);
878 } else {
879 int i;
880
881 for (i = 0; i < 4; i++)
882 iv_out[i] = readl_relaxed(pce_dev->iobase +
883 (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
884
885 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
886 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
887 pce_dev->chan_ce_out_status);
888 }
889
890 return 0;
891};
892
893static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
894{
895 struct ablkcipher_request *areq;
896 uint32_t iv_out[4];
897 unsigned char iv[4 * sizeof(uint32_t)];
898
899 areq = (struct ablkcipher_request *) pce_dev->areq;
900
901 /* get iv out */
902 if (pce_dev->mode == QCE_MODE_ECB) {
903 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
904 pce_dev->chan_ce_out_status);
905 } else {
906 int i;
907
908 for (i = 0; i < 4; i++)
909 iv_out[i] = readl_relaxed(pce_dev->iobase +
910 CRYPTO_CNTR0_IV0_REG + (i * sizeof(uint32_t)));
911
912 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
913 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
914 pce_dev->chan_ce_out_status);
915 }
916
917 return 0;
918};
919
920static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
921 unsigned int plen, unsigned int paddr, int *index)
922{
923 while (plen > 0x8000) {
924 pdesc->len = 0x8000;
925 if (paddr > 0) {
926 pdesc->addr = paddr;
927 paddr += 0x8000;
928 }
929 plen -= pdesc->len;
930 if (plen > 0) {
931 *index = (*index) + 1;
932 if ((*index) >= QCE_MAX_NUM_DESC)
933 return -ENOMEM;
934 pdesc++;
935 }
936 }
937 if ((plen > 0) && (plen <= 0x8000)) {
938 pdesc->len = plen;
939 if (paddr > 0)
940 pdesc->addr = paddr;
941 }
942
943 return 0;
944}
945
946static int _chain_sg_buffer_in(struct qce_device *pce_dev,
947 struct scatterlist *sg, unsigned int nbytes)
948{
949 unsigned int len;
950 unsigned int dlen;
951 struct dmov_desc *pdesc;
952
953 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
954 if (nbytes > 0x8000)
955 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
956 &pce_dev->ce_in_dst_desc_index);
957 else
958 pdesc->len = nbytes;
959
960 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
961 /*
962 * Two consective chunks may be handled by the old
963 * buffer descriptor.
964 */
965 while (nbytes > 0) {
966 len = min(nbytes, sg_dma_len(sg));
967 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
968 nbytes -= len;
969 if (dlen == 0) {
970 pdesc->addr = sg_dma_address(sg);
971 pdesc->len = len;
972 if (pdesc->len > 0x8000)
973 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
974 sg_dma_address(sg),
975 &pce_dev->ce_in_src_desc_index);
976 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
977 pdesc->len = dlen + len;
978 if (pdesc->len > 0x8000)
979 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
980 pdesc->addr,
981 &pce_dev->ce_in_src_desc_index);
982 } else {
983 pce_dev->ce_in_src_desc_index++;
984 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
985 return -ENOMEM;
986 pdesc++;
987 pdesc->len = len;
988 pdesc->addr = sg_dma_address(sg);
989 if (pdesc->len > 0x8000)
990 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
991 sg_dma_address(sg),
992 &pce_dev->ce_in_src_desc_index);
993 }
994 if (nbytes > 0)
995 sg = sg_next(sg);
996 }
997 return 0;
998}
999
1000static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1001 unsigned int pmem, unsigned int nbytes)
1002{
1003 unsigned int dlen;
1004 struct dmov_desc *pdesc;
1005
1006 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1007 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1008 if (dlen == 0) {
1009 pdesc->addr = pmem;
1010 pdesc->len = nbytes;
1011 } else if (pmem == (pdesc->addr + dlen)) {
1012 pdesc->len = dlen + nbytes;
1013 } else {
1014 pce_dev->ce_in_src_desc_index++;
1015 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1016 return -ENOMEM;
1017 pdesc++;
1018 pdesc->len = nbytes;
1019 pdesc->addr = pmem;
1020 }
1021 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1022 pdesc->len += nbytes;
1023
1024 return 0;
1025}
1026
1027static void _chain_buffer_in_init(struct qce_device *pce_dev)
1028{
1029 struct dmov_desc *pdesc;
1030
1031 pce_dev->ce_in_src_desc_index = 0;
1032 pce_dev->ce_in_dst_desc_index = 0;
1033 pdesc = pce_dev->ce_in_src_desc;
1034 pdesc->len = 0;
1035}
1036
1037static void _ce_in_final(struct qce_device *pce_dev, unsigned total)
1038{
1039 struct dmov_desc *pdesc;
1040 dmov_sg *pcmd;
1041
1042 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1043 pdesc->len |= ADM_DESC_LAST;
1044 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
Mona Hossain390d92e2011-09-02 14:15:47 -07001045 if (total)
1046 pdesc->len = ADM_DESC_LAST | total;
1047 else
1048 pdesc->len |= ADM_DESC_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001049
1050 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1051 pcmd->cmd |= CMD_LC;
1052}
1053
1054#ifdef QCE_DEBUG
1055static void _ce_in_dump(struct qce_device *pce_dev)
1056{
1057 int i;
1058 struct dmov_desc *pdesc;
1059
1060 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
1061 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1062 pdesc = pce_dev->ce_in_src_desc + i;
1063 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1064 pdesc->len);
1065 }
1066 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1067 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1068 pdesc = pce_dev->ce_in_dst_desc + i;
1069 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1070 pdesc->len);
1071 }
1072};
1073
1074static void _ce_out_dump(struct qce_device *pce_dev)
1075{
1076 int i;
1077 struct dmov_desc *pdesc;
1078
1079 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1080 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1081 pdesc = pce_dev->ce_out_src_desc + i;
1082 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1083 pdesc->len);
1084 }
1085
1086 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
1087 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1088 pdesc = pce_dev->ce_out_dst_desc + i;
1089 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1090 pdesc->len);
1091 }
1092};
1093
1094#else
1095
1096static void _ce_in_dump(struct qce_device *pce_dev)
1097{
1098};
1099
1100static void _ce_out_dump(struct qce_device *pce_dev)
1101{
1102};
1103
1104#endif
1105
1106static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1107 struct scatterlist *sg, unsigned int nbytes)
1108{
1109 unsigned int len;
1110 unsigned int dlen;
1111 struct dmov_desc *pdesc;
1112
1113 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1114 if (nbytes > 0x8000)
1115 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1116 &pce_dev->ce_out_src_desc_index);
1117 else
1118 pdesc->len = nbytes;
1119
1120 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1121 /*
1122 * Two consective chunks may be handled by the old
1123 * buffer descriptor.
1124 */
1125 while (nbytes > 0) {
1126 len = min(nbytes, sg_dma_len(sg));
1127 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1128 nbytes -= len;
1129 if (dlen == 0) {
1130 pdesc->addr = sg_dma_address(sg);
1131 pdesc->len = len;
1132 if (pdesc->len > 0x8000)
1133 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1134 sg_dma_address(sg),
1135 &pce_dev->ce_out_dst_desc_index);
1136 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1137 pdesc->len = dlen + len;
1138 if (pdesc->len > 0x8000)
1139 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1140 pdesc->addr,
1141 &pce_dev->ce_out_dst_desc_index);
1142
1143 } else {
1144 pce_dev->ce_out_dst_desc_index++;
1145 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1146 return -EIO;
1147 pdesc++;
1148 pdesc->len = len;
1149 pdesc->addr = sg_dma_address(sg);
1150 if (pdesc->len > 0x8000)
1151 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1152 sg_dma_address(sg),
1153 &pce_dev->ce_out_dst_desc_index);
1154
1155 }
1156 if (nbytes > 0)
1157 sg = sg_next(sg);
1158 }
1159 return 0;
1160}
1161
1162static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1163 unsigned int pmem, unsigned int nbytes)
1164{
1165 unsigned int dlen;
1166 struct dmov_desc *pdesc;
1167
1168 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1169 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1170
1171 if (dlen == 0) {
1172 pdesc->addr = pmem;
1173 pdesc->len = nbytes;
1174 } else if (pmem == (pdesc->addr + dlen)) {
1175 pdesc->len = dlen + nbytes;
1176 } else {
1177 pce_dev->ce_out_dst_desc_index++;
1178 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1179 return -EIO;
1180 pdesc++;
1181 pdesc->len = nbytes;
1182 pdesc->addr = pmem;
1183 }
1184 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1185 pdesc->len += nbytes;
1186
1187 return 0;
1188};
1189
1190static void _chain_buffer_out_init(struct qce_device *pce_dev)
1191{
1192 struct dmov_desc *pdesc;
1193
1194 pce_dev->ce_out_dst_desc_index = 0;
1195 pce_dev->ce_out_src_desc_index = 0;
1196 pdesc = pce_dev->ce_out_dst_desc;
1197 pdesc->len = 0;
1198};
1199
1200static void _ce_out_final(struct qce_device *pce_dev, unsigned total)
1201{
1202 struct dmov_desc *pdesc;
1203 dmov_sg *pcmd;
1204
1205 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1206 pdesc->len |= ADM_DESC_LAST;
1207 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
Mona Hossain390d92e2011-09-02 14:15:47 -07001208 if (total)
1209 pdesc->len = ADM_DESC_LAST | total;
1210 else
1211 pdesc->len |= ADM_DESC_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001212 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1213 pcmd->cmd |= CMD_LC;
1214};
1215
1216static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1217 unsigned int result, struct msm_dmov_errdata *err)
1218{
1219 struct qce_device *pce_dev;
1220
1221 pce_dev = (struct qce_device *) cmd_ptr->user;
1222 if (result != ADM_STATUS_OK) {
1223 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1224 result);
1225 pce_dev->chan_ce_in_status = -1;
1226 } else {
1227 pce_dev->chan_ce_in_status = 0;
1228 }
1229
1230 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1231 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1232 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1233 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1234
1235 /* done */
1236 _aead_complete(pce_dev);
1237 }
1238};
1239
1240static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1241 unsigned int result, struct msm_dmov_errdata *err)
1242{
1243 struct qce_device *pce_dev;
1244
1245 pce_dev = (struct qce_device *) cmd_ptr->user;
1246 if (result != ADM_STATUS_OK) {
1247 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1248 result);
1249 pce_dev->chan_ce_out_status = -1;
1250 } else {
1251 pce_dev->chan_ce_out_status = 0;
1252 };
1253
1254 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1255 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1256 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1257 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1258
1259 /* done */
1260 _aead_complete(pce_dev);
1261 }
1262
1263};
1264
1265static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1266 unsigned int result, struct msm_dmov_errdata *err)
1267{
1268 struct qce_device *pce_dev;
1269
1270 pce_dev = (struct qce_device *) cmd_ptr->user;
1271 if (result != ADM_STATUS_OK) {
1272 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1273 result);
1274 pce_dev->chan_ce_in_status = -1;
1275 } else {
1276 pce_dev->chan_ce_in_status = 0;
1277 }
1278 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1279 _sha_complete(pce_dev);
1280};
1281
1282static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1283 unsigned int result, struct msm_dmov_errdata *err)
1284{
1285 struct qce_device *pce_dev;
1286
1287 pce_dev = (struct qce_device *) cmd_ptr->user;
1288 if (result != ADM_STATUS_OK) {
1289 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1290 result);
1291 pce_dev->chan_ce_in_status = -1;
1292 } else {
1293 pce_dev->chan_ce_in_status = 0;
1294 }
1295
1296 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1297 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1298 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1299 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1300
1301 /* done */
1302 _ablk_cipher_complete(pce_dev);
1303 }
1304};
1305
1306static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1307 unsigned int result, struct msm_dmov_errdata *err)
1308{
1309 struct qce_device *pce_dev;
1310
1311 pce_dev = (struct qce_device *) cmd_ptr->user;
1312 if (result != ADM_STATUS_OK) {
1313 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1314 result);
1315 pce_dev->chan_ce_out_status = -1;
1316 } else {
1317 pce_dev->chan_ce_out_status = 0;
1318 };
1319
1320 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1321 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1322 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1323 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1324
1325 /* done */
1326 _ablk_cipher_complete(pce_dev);
1327 }
1328};
1329
1330
1331static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1332 unsigned int result, struct msm_dmov_errdata *err)
1333{
1334 struct qce_device *pce_dev;
1335
1336 pce_dev = (struct qce_device *) cmd_ptr->user;
1337 if (result != ADM_STATUS_OK) {
1338 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1339 result);
1340 pce_dev->chan_ce_in_status = -1;
1341 } else {
1342 pce_dev->chan_ce_in_status = 0;
1343 }
1344
1345 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1346 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1347 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1348 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1349
1350 /* done */
1351 _ablk_cipher_use_pmem_complete(pce_dev);
1352 }
1353};
1354
1355static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1356 unsigned int result, struct msm_dmov_errdata *err)
1357{
1358 struct qce_device *pce_dev;
1359
1360 pce_dev = (struct qce_device *) cmd_ptr->user;
1361 if (result != ADM_STATUS_OK) {
1362 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1363 result);
1364 pce_dev->chan_ce_out_status = -1;
1365 } else {
1366 pce_dev->chan_ce_out_status = 0;
1367 };
1368
1369 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1370 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1371 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1372 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1373
1374 /* done */
1375 _ablk_cipher_use_pmem_complete(pce_dev);
1376 }
1377};
1378
1379static int _setup_cmd_template(struct qce_device *pce_dev)
1380{
1381 dmov_sg *pcmd;
1382 struct dmov_desc *pdesc;
1383 unsigned char *vaddr;
1384 int i = 0;
1385
1386 /* Divide up the 4K coherent memory */
1387
1388 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1389 vaddr = pce_dev->coh_vmem;
1390 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1391 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1392 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1393 (vaddr - pce_dev->coh_vmem);
1394 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1395
1396 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1397 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1398 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1399 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1400 (vaddr - pce_dev->coh_vmem);
1401 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1402
1403 /* 3. ce_in channel command list of one scatter gather command */
1404 pce_dev->cmd_list_ce_in = vaddr;
1405 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1406 + (vaddr - pce_dev->coh_vmem);
1407 vaddr = vaddr + sizeof(dmov_sg);
1408
1409 /* 4. authentication result. */
1410 pce_dev->dig_result = vaddr;
1411 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1412 (vaddr - pce_dev->coh_vmem);
1413 vaddr = vaddr + SHA256_DIGESTSIZE;
1414
1415 /* 5. ce_out channel command list of one scatter gather command */
1416 pce_dev->cmd_list_ce_out = vaddr;
1417 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1418 + (vaddr - pce_dev->coh_vmem);
1419 vaddr = vaddr + sizeof(dmov_sg);
1420
1421 /* 6. ce_out channel command src descriptors, 1 entry */
1422 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1423 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1424 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1425 + (vaddr - pce_dev->coh_vmem);
1426 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1427
1428 /* 7. ce_out channel command dst descriptors, 128 entries. */
1429 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1430 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1431 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1432 + (vaddr - pce_dev->coh_vmem);
1433 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1434
1435 /* 8. pad area. */
1436 pce_dev->ce_pad = vaddr;
1437 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1438 (vaddr - pce_dev->coh_vmem);
1439
1440 /* Padding length is set to twice for worst case scenario in AES-CCM */
1441 vaddr = vaddr + 2 * ADM_CE_BLOCK_SIZE;
1442
1443 /* 9. ce_in channel command pointer list. */
1444 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1445 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1446 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1447 (vaddr - pce_dev->coh_vmem);
1448 vaddr = vaddr + sizeof(unsigned char *);
1449
1450 /* 10. ce_ou channel command pointer list. */
1451 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1452 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1453 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1454 (vaddr - pce_dev->coh_vmem);
1455 vaddr = vaddr + sizeof(unsigned char *);
1456
1457 /* 11. throw away area to store by-pass data from ce_out. */
1458 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1459 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1460 + (vaddr - pce_dev->coh_vmem);
Mona Hossaine1b13f82011-08-30 09:35:49 -07001461 pce_dev->ce_out_ignore_size = QCE_BUF_SIZE - (vaddr -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 pce_dev->coh_vmem); /* at least 1.5 K of space */
1463 /*
1464 * The first command of command list ce_in is for the input of
1465 * concurrent operation of encrypt/decrypt or for the input
1466 * of authentication.
1467 */
1468 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1469 /* swap byte and half word , dst crci , scatter gather */
1470 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1471 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1472 pdesc = pce_dev->ce_in_src_desc;
1473 pdesc->addr = 0; /* to be filled in each operation */
1474 pdesc->len = 0; /* to be filled in each operation */
1475 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1476
1477 pdesc = pce_dev->ce_in_dst_desc;
1478 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1479 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1480 pdesc->len = 0; /* to be filled in each operation */
1481 pdesc++;
1482 }
1483 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1484 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1485 DST_INDEX_SG_CMD(0);
1486 pcmd++;
1487
1488 /* setup command pointer list */
1489 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1490 DMOV_CMD_ADDR((unsigned int)
1491 pce_dev->phy_cmd_list_ce_in));
1492 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1493 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1494 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1495 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496
1497 /*
1498 * The first command in the command list ce_out.
1499 * It is for encry/decryp output.
1500 * If hashing only, ce_out is not used.
1501 */
1502 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1503 /* swap byte, half word, source crci, scatter gather */
1504 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1505 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1506
1507 pdesc = pce_dev->ce_out_src_desc;
1508 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1509 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1510 pdesc->len = 0; /* to be filled in each operation */
1511 pdesc++;
1512 }
1513 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1514
1515 pdesc = pce_dev->ce_out_dst_desc;
1516 pdesc->addr = 0; /* to be filled in each operation */
1517 pdesc->len = 0; /* to be filled in each operation */
1518 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1519 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1520 DST_INDEX_SG_CMD(0);
1521
1522 pcmd++;
1523
1524 /* setup command pointer list */
1525 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1526 DMOV_CMD_ADDR((unsigned int)pce_dev->
1527 phy_cmd_list_ce_out));
1528
1529 pce_dev->chan_ce_out_cmd->user = pce_dev;
1530 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1531 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1532 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001533
1534 return 0;
1535};
1536
1537static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1538{
1539
1540 if (ce_in)
1541 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1542 else
1543 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1544
1545 if (ce_out)
1546 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1547 else
1548 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1549
1550 if (ce_in)
1551 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1552 pce_dev->chan_ce_in_cmd);
1553 if (ce_out)
1554 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1555 pce_dev->chan_ce_out_cmd);
1556
1557 return 0;
1558};
1559
1560int qce_aead_req(void *handle, struct qce_req *q_req)
1561{
1562 struct qce_device *pce_dev = (struct qce_device *) handle;
1563 struct aead_request *areq = (struct aead_request *) q_req->areq;
1564 uint32_t authsize = q_req->authsize;
1565 uint32_t totallen_in, totallen_out, out_len;
1566 uint32_t pad_len_in, pad_len_out;
1567 uint32_t pad_mac_len_out, pad_ptx_len_out;
1568 int rc = 0;
1569
1570 if (q_req->dir == QCE_ENCRYPT) {
1571 q_req->cryptlen = areq->cryptlen;
1572 totallen_in = q_req->cryptlen + areq->assoclen;
1573 totallen_out = q_req->cryptlen + authsize + areq->assoclen;
1574 out_len = areq->cryptlen + authsize;
1575 pad_len_in = ALIGN(totallen_in, ADM_CE_BLOCK_SIZE) -
1576 totallen_in;
1577 pad_mac_len_out = ALIGN(authsize, ADM_CE_BLOCK_SIZE) -
1578 authsize;
1579 pad_ptx_len_out = ALIGN(q_req->cryptlen, ADM_CE_BLOCK_SIZE) -
1580 q_req->cryptlen;
1581 pad_len_out = pad_ptx_len_out + pad_mac_len_out;
1582 totallen_out += pad_len_out;
1583 } else {
1584 q_req->cryptlen = areq->cryptlen - authsize;
1585 totallen_in = areq->cryptlen + areq->assoclen;
1586 totallen_out = q_req->cryptlen + areq->assoclen;
1587 out_len = areq->cryptlen - authsize;
1588 pad_len_in = ALIGN(areq->cryptlen, ADM_CE_BLOCK_SIZE) -
1589 areq->cryptlen;
1590 pad_len_out = pad_len_in + authsize;
1591 totallen_out += pad_len_out;
1592 }
1593
1594 _chain_buffer_in_init(pce_dev);
1595 _chain_buffer_out_init(pce_dev);
1596
1597 pce_dev->assoc_nents = 0;
1598 pce_dev->src_nents = 0;
1599 pce_dev->dst_nents = 0;
1600 pce_dev->ivsize = q_req->ivsize;
1601 pce_dev->authsize = q_req->authsize;
1602
1603 /* associated data input */
1604 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1605 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1606 DMA_TO_DEVICE);
1607 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1608 rc = -ENOMEM;
1609 goto bad;
1610 }
1611 /* cipher input */
1612 pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
1613 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1614 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1615 DMA_TO_DEVICE);
1616 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) {
1617 rc = -ENOMEM;
1618 goto bad;
1619 }
1620 /* pad data in */
1621 if (pad_len_in) {
1622 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1623 pad_len_in) < 0) {
1624 rc = -ENOMEM;
1625 goto bad;
1626 }
1627 }
1628
1629 /* ignore associated data */
1630 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1631 areq->assoclen) < 0) {
1632 rc = -ENOMEM;
1633 goto bad;
1634 }
1635 /* cipher + mac output for encryption */
1636 if (areq->src != areq->dst) {
1637 pce_dev->dst_nents = count_sg(areq->dst, out_len);
1638 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1639 DMA_FROM_DEVICE);
1640 };
1641 if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) {
1642 rc = -ENOMEM;
1643 goto bad;
1644 }
1645 /* pad data out */
1646 if (pad_len_out) {
1647 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1648 pad_len_out) < 0) {
1649 rc = -ENOMEM;
1650 goto bad;
1651 }
1652 }
1653
1654 /* finalize the ce_in and ce_out channels command lists */
1655 _ce_in_final(pce_dev, ALIGN(totallen_in, ADM_CE_BLOCK_SIZE));
1656 _ce_out_final(pce_dev, ALIGN(totallen_out, ADM_CE_BLOCK_SIZE));
1657
1658 /* set up crypto device */
1659 rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen);
1660 if (rc < 0)
1661 goto bad;
1662
1663 /* setup for callback, and issue command to adm */
1664 pce_dev->areq = q_req->areq;
1665 pce_dev->qce_cb = q_req->qce_cb;
1666
1667 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
1668 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
1669
1670 _ce_in_dump(pce_dev);
1671 _ce_out_dump(pce_dev);
1672
1673 rc = _qce_start_dma(pce_dev, true, true);
1674 if (rc == 0)
1675 return 0;
1676bad:
1677 if (pce_dev->assoc_nents) {
1678 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1679 DMA_TO_DEVICE);
1680 }
1681
1682 if (pce_dev->src_nents) {
1683 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1684 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1685 DMA_TO_DEVICE);
1686 }
1687 if (pce_dev->dst_nents) {
1688 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1689 DMA_FROM_DEVICE);
1690 }
1691 return rc;
1692}
1693EXPORT_SYMBOL(qce_aead_req);
1694
1695int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
1696{
1697 int rc = 0;
1698 struct qce_device *pce_dev = (struct qce_device *) handle;
1699 struct ablkcipher_request *areq = (struct ablkcipher_request *)
1700 c_req->areq;
1701
1702 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
1703 - areq->nbytes;
1704
1705 _chain_buffer_in_init(pce_dev);
1706 _chain_buffer_out_init(pce_dev);
1707
1708 pce_dev->src_nents = 0;
1709 pce_dev->dst_nents = 0;
1710
1711 /* cipher input */
1712 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
1713
1714 if (c_req->use_pmem != 1)
1715 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1716 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1717 DMA_TO_DEVICE);
1718 else
1719 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
1720 areq->src);
1721
1722 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
1723 rc = -ENOMEM;
1724 goto bad;
1725 }
1726
1727 /* cipher output */
1728 if (areq->src != areq->dst) {
1729 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
1730 if (c_req->use_pmem != 1)
1731 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1732 DMA_FROM_DEVICE);
1733 else
1734 dma_map_pmem_sg(&c_req->pmem->dst[0],
1735 pce_dev->dst_nents, areq->dst);
1736 };
1737 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
1738 rc = -ENOMEM;
1739 goto bad;
1740 }
1741
1742 /* pad data */
1743 if (pad_len) {
1744 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1745 pad_len) < 0) {
1746 rc = -ENOMEM;
1747 goto bad;
1748 }
1749 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1750 pad_len) < 0) {
1751 rc = -ENOMEM;
1752 goto bad;
1753 }
1754 }
1755
1756 /* finalize the ce_in and ce_out channels command lists */
Mona Hossain390d92e2011-09-02 14:15:47 -07001757 _ce_in_final(pce_dev, 0);
1758 _ce_out_final(pce_dev, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001759
1760 _ce_in_dump(pce_dev);
1761 _ce_out_dump(pce_dev);
1762
1763 /* set up crypto device */
1764 rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0);
1765 if (rc < 0)
1766 goto bad;
1767
1768 /* setup for callback, and issue command to adm */
1769 pce_dev->areq = areq;
1770 pce_dev->qce_cb = c_req->qce_cb;
1771 if (c_req->use_pmem == 1) {
1772 pce_dev->chan_ce_in_cmd->complete_func =
1773 _ablk_cipher_ce_in_call_back_pmem;
1774 pce_dev->chan_ce_out_cmd->complete_func =
1775 _ablk_cipher_ce_out_call_back_pmem;
1776 } else {
1777 pce_dev->chan_ce_in_cmd->complete_func =
1778 _ablk_cipher_ce_in_call_back;
1779 pce_dev->chan_ce_out_cmd->complete_func =
1780 _ablk_cipher_ce_out_call_back;
1781 }
1782 rc = _qce_start_dma(pce_dev, true, true);
1783
1784 if (rc == 0)
1785 return 0;
1786bad:
1787 if (c_req->use_pmem != 1) {
1788 if (pce_dev->dst_nents) {
1789 dma_unmap_sg(pce_dev->pdev, areq->dst,
1790 pce_dev->dst_nents, DMA_FROM_DEVICE);
1791 }
1792 if (pce_dev->src_nents) {
1793 dma_unmap_sg(pce_dev->pdev, areq->src,
1794 pce_dev->src_nents,
1795 (areq->src == areq->dst) ?
1796 DMA_BIDIRECTIONAL :
1797 DMA_TO_DEVICE);
1798 }
1799 }
1800 return rc;
1801}
1802EXPORT_SYMBOL(qce_ablk_cipher_req);
1803
1804int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
1805{
1806 struct qce_device *pce_dev = (struct qce_device *) handle;
1807 int rc;
1808 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
1809 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
1810
1811 _chain_buffer_in_init(pce_dev);
1812 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
1813 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
1814 DMA_TO_DEVICE);
1815
1816 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
1817 rc = -ENOMEM;
1818 goto bad;
1819 }
1820
1821 if (pad_len) {
1822 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1823 pad_len) < 0) {
1824 rc = -ENOMEM;
1825 goto bad;
1826 }
1827 }
Mona Hossain390d92e2011-09-02 14:15:47 -07001828 _ce_in_final(pce_dev, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829
1830 _ce_in_dump(pce_dev);
1831
1832 rc = _ce_setup_hash(pce_dev, sreq);
1833
1834 if (rc < 0)
1835 goto bad;
1836
1837 pce_dev->areq = areq;
1838 pce_dev->qce_cb = sreq->qce_cb;
1839 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
1840
1841 rc = _qce_start_dma(pce_dev, true, false);
1842
1843 if (rc == 0)
1844 return 0;
1845bad:
1846 if (pce_dev->src_nents) {
1847 dma_unmap_sg(pce_dev->pdev, sreq->src,
1848 pce_dev->src_nents, DMA_TO_DEVICE);
1849 }
1850
1851 return rc;
1852}
1853EXPORT_SYMBOL(qce_process_sha_req);
1854
1855/* crypto engine open function. */
1856void *qce_open(struct platform_device *pdev, int *rc)
1857{
1858 struct qce_device *pce_dev;
1859 struct resource *resource;
1860 struct clk *ce_core_clk;
1861 struct clk *ce_clk;
1862
1863 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
1864 if (!pce_dev) {
1865 *rc = -ENOMEM;
1866 dev_err(&pdev->dev, "Can not allocate memory\n");
1867 return NULL;
1868 }
1869 pce_dev->pdev = &pdev->dev;
1870
1871 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1872 if (!resource) {
1873 *rc = -ENXIO;
1874 dev_err(pce_dev->pdev, "Missing MEM resource\n");
1875 goto err_pce_dev;
1876 };
1877 pce_dev->phy_iobase = resource->start;
1878 pce_dev->iobase = ioremap_nocache(resource->start,
1879 resource->end - resource->start + 1);
1880 if (!pce_dev->iobase) {
1881 *rc = -ENOMEM;
1882 dev_err(pce_dev->pdev, "Can not map io memory\n");
1883 goto err_pce_dev;
1884 }
1885
1886 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1887 GFP_KERNEL);
1888 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
1889 GFP_KERNEL);
1890 if (pce_dev->chan_ce_in_cmd == NULL ||
1891 pce_dev->chan_ce_out_cmd == NULL) {
1892 dev_err(pce_dev->pdev, "Can not allocate memory\n");
1893 *rc = -ENOMEM;
1894 goto err_dm_chan_cmd;
1895 }
1896
1897 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1898 "crypto_channels");
1899 if (!resource) {
1900 *rc = -ENXIO;
1901 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
1902 goto err_dm_chan_cmd;
1903 };
1904 pce_dev->chan_ce_in = resource->start;
1905 pce_dev->chan_ce_out = resource->end;
1906 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1907 "crypto_crci_in");
1908 if (!resource) {
1909 *rc = -ENXIO;
1910 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
1911 goto err_dm_chan_cmd;
1912 };
1913 pce_dev->crci_in = resource->start;
1914 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
1915 "crypto_crci_out");
1916 if (!resource) {
1917 *rc = -ENXIO;
1918 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
1919 goto err_dm_chan_cmd;
1920 };
1921 pce_dev->crci_out = resource->start;
1922
1923 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
1924 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
1925
1926 if (pce_dev->coh_vmem == NULL) {
1927 *rc = -ENOMEM;
1928 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
1929 goto err;
1930 }
1931
1932 /* Get CE core clk */
Matt Wagantallc4b3a4d2011-08-17 16:58:39 -07001933 ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 if (IS_ERR(ce_core_clk)) {
1935 *rc = PTR_ERR(ce_core_clk);
1936 goto err;
1937 }
1938 pce_dev->ce_core_clk = ce_core_clk;
1939 /* Get CE clk */
Matt Wagantallc4b3a4d2011-08-17 16:58:39 -07001940 ce_clk = clk_get(pce_dev->pdev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001941 if (IS_ERR(ce_clk)) {
1942 *rc = PTR_ERR(ce_clk);
1943 clk_put(pce_dev->ce_core_clk);
1944 goto err;
1945 }
1946 pce_dev->ce_clk = ce_clk;
1947
1948 /* Enable CE core clk */
1949 *rc = clk_enable(pce_dev->ce_core_clk);
1950 if (*rc) {
1951 clk_put(pce_dev->ce_core_clk);
1952 clk_put(pce_dev->ce_clk);
1953 goto err;
1954 } else {
1955 /* Enable CE clk */
1956 *rc = clk_enable(pce_dev->ce_clk);
1957 if (*rc) {
1958 clk_disable(pce_dev->ce_core_clk);
1959 clk_put(pce_dev->ce_core_clk);
1960 clk_put(pce_dev->ce_clk);
1961 goto err;
1962
1963 }
1964 }
1965 _setup_cmd_template(pce_dev);
1966
1967 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1968 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1969
1970 if (_init_ce_engine(pce_dev)) {
1971 *rc = -ENXIO;
1972 goto err;
1973 }
1974 *rc = 0;
1975 return pce_dev;
1976
1977err:
1978 if (pce_dev->coh_vmem)
Mona Hossaine1b13f82011-08-30 09:35:49 -07001979 dma_free_coherent(pce_dev->pdev, QCE_BUF_SIZE,
1980 pce_dev->coh_vmem, pce_dev->coh_pmem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001981err_dm_chan_cmd:
1982 kfree(pce_dev->chan_ce_in_cmd);
1983 kfree(pce_dev->chan_ce_out_cmd);
1984 if (pce_dev->iobase)
1985 iounmap(pce_dev->iobase);
1986
1987err_pce_dev:
1988
1989 kfree(pce_dev);
1990
1991 return NULL;
1992}
1993EXPORT_SYMBOL(qce_open);
1994
1995/* crypto engine close function. */
1996int qce_close(void *handle)
1997{
1998 struct qce_device *pce_dev = (struct qce_device *) handle;
1999
2000 if (handle == NULL)
2001 return -ENODEV;
2002 if (pce_dev->iobase)
2003 iounmap(pce_dev->iobase);
2004
2005 if (pce_dev->coh_vmem)
2006 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
2007 pce_dev->coh_pmem);
2008 clk_disable(pce_dev->ce_clk);
2009 clk_disable(pce_dev->ce_core_clk);
2010
2011 clk_put(pce_dev->ce_clk);
2012 clk_put(pce_dev->ce_core_clk);
2013
2014 kfree(pce_dev->chan_ce_in_cmd);
2015 kfree(pce_dev->chan_ce_out_cmd);
2016 kfree(handle);
2017
2018 return 0;
2019}
2020EXPORT_SYMBOL(qce_close);
2021
2022int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2023{
2024 if (ce_support == NULL)
2025 return -EINVAL;
2026
2027 ce_support->sha1_hmac_20 = false;
2028 ce_support->sha1_hmac = false;
2029 ce_support->sha256_hmac = false;
2030 ce_support->sha_hmac = false;
2031 ce_support->cmac = true;
2032 ce_support->aes_key_192 = false;
2033 ce_support->aes_xts = true;
2034 ce_support->aes_ccm = true;
2035 ce_support->ota = false;
2036 return 0;
2037}
2038EXPORT_SYMBOL(qce_hw_support);
2039
2040MODULE_LICENSE("GPL v2");
2041MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2042MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossain390d92e2011-09-02 14:15:47 -07002043MODULE_VERSION("2.07");