blob: 077e6c09cdebc4d849074a571cbd749f670f816a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
27#include <crypto/hash.h>
28#include <crypto/sha.h>
29
30#include <linux/qcota.h>
31#include <mach/dma.h>
32
33#include "inc/qce.h"
34#include "inc/qcedev.h"
35#include "inc/qcryptohw_30.h"
36#include "inc/qce_ota.h"
37
38/* ADM definitions */
39#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
40#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
41#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
42#define ADM_DESC_LAST (1 << 31)
43
44/* Data xfer between DM and CE in blocks of 16 bytes */
45#define ADM_CE_BLOCK_SIZE 16
46
47/* Data xfer between DM and CE in blocks of 64 bytes */
48#define ADM_SHA_BLOCK_SIZE 64
49
50#define ADM_DESC_LENGTH_MASK 0xffff
51#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
52
53struct dmov_desc {
54 uint32_t addr;
55 uint32_t len;
56};
57
58#define ADM_STATUS_OK 0x80000002
59
60/* Misc definitions */
61
62/* QCE max number of descriptor in a descriptor list */
63#define QCE_MAX_NUM_DESC 128
64
65/* State of DM channel */
66enum qce_chan_st_enum {
67 QCE_CHAN_STATE_IDLE = 0,
68 QCE_CHAN_STATE_IN_PROG = 1,
69 QCE_CHAN_STATE_COMP = 2,
70 QCE_CHAN_STATE_LAST
71};
72
73/*
74 * CE HW device structure.
75 * Each engine has an instance of the structure.
76 * Each engine can only handle one crypto operation at one time. It is up to
77 * the sw above to ensure single threading of operation on an engine.
78 */
79struct qce_device {
80 struct device *pdev; /* Handle to platform_device structure */
81 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
82 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
83 void __iomem *iobase; /* Virtual io base of CE HW */
84 unsigned int phy_iobase; /* Physical io base of CE HW */
85 struct clk *ce_clk; /* Handle to CE clk */
86 unsigned int crci_in; /* CRCI for CE DM IN Channel */
87 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
88 unsigned int crci_hash; /* CRCI for CE HASH */
89 unsigned int chan_ce_in; /* ADM channel used for CE input
90 * and auth result if authentication
91 * only operation. */
92 unsigned int chan_ce_out; /* ADM channel used for CE output,
93 and icv for esp */
94
95
96 unsigned int *cmd_pointer_list_ce_in;
97 dma_addr_t phy_cmd_pointer_list_ce_in;
98
99 unsigned int *cmd_pointer_list_ce_out;
100 dma_addr_t phy_cmd_pointer_list_ce_out;
101
102 unsigned char *cmd_list_ce_in;
103 dma_addr_t phy_cmd_list_ce_in;
104
105 unsigned char *cmd_list_ce_out;
106 dma_addr_t phy_cmd_list_ce_out;
107
108 struct dmov_desc *ce_out_src_desc;
109 dma_addr_t phy_ce_out_src_desc;
110
111 struct dmov_desc *ce_out_dst_desc;
112 dma_addr_t phy_ce_out_dst_desc;
113
114 struct dmov_desc *ce_in_src_desc;
115 dma_addr_t phy_ce_in_src_desc;
116
117 struct dmov_desc *ce_in_dst_desc;
118 dma_addr_t phy_ce_in_dst_desc;
119
120 unsigned char *ce_out_ignore;
121 dma_addr_t phy_ce_out_ignore;
122
123 unsigned char *ce_pad;
124 dma_addr_t phy_ce_pad;
125
126 struct msm_dmov_cmd *chan_ce_in_cmd;
127 struct msm_dmov_cmd *chan_ce_out_cmd;
128
129 uint32_t ce_out_ignore_size;
130
131 int ce_out_dst_desc_index;
132 int ce_in_src_desc_index;
133
134 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
135 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
136
137 int chan_ce_in_status; /* chan ce_in status */
138 int chan_ce_out_status; /* chan ce_out status */
139
140
141 unsigned char *dig_result;
142 dma_addr_t phy_dig_result;
143
144 /* cached aes key */
145 uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
146
147 uint32_t aes_key_size; /* cached aes key size in bytes */
148 int fastaes; /* ce supports fast aes */
149 int hmac; /* ce support hmac-sha1 */
150 bool ota; /* ce support ota */
151
152 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
153
154 int assoc_nents;
155 int src_nents;
156 int dst_nents;
157
158 void *areq;
159 enum qce_cipher_mode_enum mode;
160
161 dma_addr_t phy_iv_in;
162 dma_addr_t phy_ota_src;
163 dma_addr_t phy_ota_dst;
164 unsigned int ota_size;
165 int err;
166};
167
168/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
169static uint32_t _std_init_vector_sha1[] = {
170 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
171};
172/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
173static uint32_t _std_init_vector_sha256[] = {
174 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
175 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
176};
177
178/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
179static const uint32_t _s_box[256] = {
180 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
181 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
182
183 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
184 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
185
186 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
187 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
188
189 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
190 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
191
192 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
193 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
194
195 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
196 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
197
198 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
199 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
200
201 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
202 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
203
204 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
205 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
206
207 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
208 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
209
210 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
211 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
212
213 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
214 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
215
216 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
217 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
218
219 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
220 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
221
222 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
223 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
224
225 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
226 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
227
228
229/*
230 * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
231 * Expansion.
232 */
233static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
234 uint32_t *AES_RND_KEY)
235{
236 uint32_t i;
237 uint32_t Nk;
238 uint32_t Nr, rot_data;
239 uint32_t Rcon = 0x01000000;
240 uint32_t temp;
241 uint32_t data_in;
242 uint32_t MSB_store;
243 uint32_t byte_for_sub;
244 uint32_t word_sub[4];
245
246 switch (keysize) {
247 case 192:
248 Nk = 6;
249 Nr = 12;
250 break;
251
252 case 256:
253 Nk = 8;
254 Nr = 14;
255 break;
256
257 case 128:
258 default: /* default to AES128 */
259 Nk = 4;
260 Nr = 10;
261 break;
262 }
263
264 /* key expansion */
265 i = 0;
266 while (i < Nk) {
267 AES_RND_KEY[i] = AES_KEY[i];
268 i = i + 1;
269 }
270
271 i = Nk;
272 while (i < (4 * (Nr + 1))) {
273 temp = AES_RND_KEY[i-1];
274 if (Nr == 14) {
275 switch (i) {
276 case 8:
277 Rcon = 0x01000000;
278 break;
279
280 case 16:
281 Rcon = 0x02000000;
282 break;
283
284 case 24:
285 Rcon = 0x04000000;
286 break;
287
288 case 32:
289 Rcon = 0x08000000;
290 break;
291
292 case 40:
293 Rcon = 0x10000000;
294 break;
295
296 case 48:
297 Rcon = 0x20000000;
298 break;
299
300 case 56:
301 Rcon = 0x40000000;
302 break;
303 }
304 } else if (Nr == 12) {
305 switch (i) {
306 case 6:
307 Rcon = 0x01000000;
308 break;
309
310 case 12:
311 Rcon = 0x02000000;
312 break;
313
314 case 18:
315 Rcon = 0x04000000;
316 break;
317
318 case 24:
319 Rcon = 0x08000000;
320 break;
321
322 case 30:
323 Rcon = 0x10000000;
324 break;
325
326 case 36:
327 Rcon = 0x20000000;
328 break;
329
330 case 42:
331 Rcon = 0x40000000;
332 break;
333
334 case 48:
335 Rcon = 0x80000000;
336 break;
337 }
338 } else if (Nr == 10) {
339 switch (i) {
340 case 4:
341 Rcon = 0x01000000;
342 break;
343
344 case 8:
345 Rcon = 0x02000000;
346 break;
347
348 case 12:
349 Rcon = 0x04000000;
350 break;
351
352 case 16:
353 Rcon = 0x08000000;
354 break;
355
356 case 20:
357 Rcon = 0x10000000;
358 break;
359
360 case 24:
361 Rcon = 0x20000000;
362 break;
363
364 case 28:
365 Rcon = 0x40000000;
366 break;
367
368 case 32:
369 Rcon = 0x80000000;
370 break;
371
372 case 36:
373 Rcon = 0x1b000000;
374 break;
375
376 case 40:
377 Rcon = 0x36000000;
378 break;
379 }
380 }
381
382 if ((i % Nk) == 0) {
383 data_in = temp;
384 MSB_store = (data_in >> 24 & 0xff);
385 rot_data = (data_in << 8) | MSB_store;
386 byte_for_sub = rot_data;
387 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
388 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
389 << 8);
390 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
391 << 16);
392 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
393 >> 24)] << 24);
394 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
395 word_sub[3];
396 temp = word_sub[0] ^ Rcon;
397 } else if ((Nk > 6) && ((i % Nk) == 4)) {
398 byte_for_sub = temp;
399 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
400 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
401 << 8);
402 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
403 << 16);
404 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
405 24)] << 24);
406 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
407 word_sub[3];
408 temp = word_sub[0];
409 }
410
411 AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
412 i = i+1;
413 }
414}
415
416static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
417 unsigned int len)
418{
419 unsigned n;
420
421 n = len / sizeof(uint32_t) ;
422 for (; n > 0; n--) {
423 *iv = ((*b << 24) & 0xff000000) |
424 (((*(b+1)) << 16) & 0xff0000) |
425 (((*(b+2)) << 8) & 0xff00) |
426 (*(b+3) & 0xff);
427 b += sizeof(uint32_t);
428 iv++;
429 }
430
431 n = len % sizeof(uint32_t);
432 if (n == 3) {
433 *iv = ((*b << 24) & 0xff000000) |
434 (((*(b+1)) << 16) & 0xff0000) |
435 (((*(b+2)) << 8) & 0xff00) ;
436 } else if (n == 2) {
437 *iv = ((*b << 24) & 0xff000000) |
438 (((*(b+1)) << 16) & 0xff0000) ;
439 } else if (n == 1) {
440 *iv = ((*b << 24) & 0xff000000) ;
441 }
442}
443
444static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
445 unsigned int len)
446{
447 unsigned n = len / sizeof(uint32_t);
448
449 for (; n > 0; n--) {
450 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
451 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
452 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
453 *b++ = (unsigned char) (*iv & 0xff);
454 iv++;
455 }
456 n = len % sizeof(uint32_t);
457 if (n == 3) {
458 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
459 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
460 *b = (unsigned char) ((*iv >> 8) & 0xff);
461 } else if (n == 2) {
462 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
463 *b = (unsigned char) ((*iv >> 16) & 0xff);
464 } else if (n == 1) {
465 *b = (unsigned char) ((*iv >> 24) & 0xff);
466 }
467}
468
469static int count_sg(struct scatterlist *sg, int nbytes)
470{
471 int i;
472
473 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
474 nbytes -= sg->length;
475 return i;
476}
477
478static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
479 struct scatterlist *sg)
480{
481 int i = 0;
482 for (i = 0; i < entries; i++) {
483
484 sg->dma_address = (dma_addr_t)pmem->offset;
485 sg++;
486 pmem++;
487 }
488 return 0;
489}
490
491static int _probe_ce_engine(struct qce_device *pce_dev)
492{
493 unsigned int val;
494 unsigned int rev;
495 unsigned int eng_availability; /* engine available functions */
496
497 val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
498 if ((val & 0xfffffff) != 0x0200004) {
499 dev_err(pce_dev->pdev,
500 "unknown Qualcomm crypto device at 0x%x 0x%x\n",
501 pce_dev->phy_iobase, val);
502 return -EIO;
503 };
504 rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
505 if (rev == 0x2) {
506 dev_info(pce_dev->pdev,
507 "Qualcomm Crypto 3e device found at 0x%x\n",
508 pce_dev->phy_iobase);
509 } else if (rev == 0x1) {
510 dev_info(pce_dev->pdev,
511 "Qualcomm Crypto 3 device found at 0x%x\n",
512 pce_dev->phy_iobase);
513 } else if (rev == 0x0) {
514 dev_info(pce_dev->pdev,
515 "Qualcomm Crypto 2 device found at 0x%x\n",
516 pce_dev->phy_iobase);
517 } else {
518 dev_err(pce_dev->pdev,
519 "unknown Qualcomm crypto device at 0x%x\n",
520 pce_dev->phy_iobase);
521 return -EIO;
522 }
523
524 eng_availability = readl_relaxed(pce_dev->iobase +
525 CRYPTO_ENGINES_AVAIL);
526
527 if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
528 == CRYPTO_AES_SEL_FAST)
529 pce_dev->fastaes = 1;
530 else
531 pce_dev->fastaes = 0;
532
533 if (eng_availability & (1 << CRYPTO_HMAC_SEL))
534 pce_dev->hmac = 1;
535 else
536 pce_dev->hmac = 0;
537
538 if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
539 (eng_availability & (1 << CRYPTO_F8_SEL)))
540 pce_dev->ota = true;
541 else
542 pce_dev->ota = false;
543
544 pce_dev->aes_key_size = 0;
545
546 return 0;
547};
548
549static int _init_ce_engine(struct qce_device *pce_dev)
550{
551 unsigned int val;
552
553 /* reset qce */
554 writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
555
556 /* Ensure previous instruction (write to reset bit)
557 * was completed.
558 */
559 mb();
560 /* configure ce */
561 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
562 (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
563 (1 << CRYPTO_MASK_ERR_INTR);
564 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
565
566 if (_probe_ce_engine(pce_dev) < 0)
567 return -EIO;
568 if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
569 dev_err(pce_dev->pdev,
570 "unknown Qualcomm crypto device at 0x%x\n",
571 pce_dev->phy_iobase);
572 return -EIO;
573 };
574 return 0;
575};
576
577static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
578{
579 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
580 uint32_t diglen;
581 int rc;
582 int i;
583 uint32_t cfg = 0;
584
585 /* if not the last, the size has to be on the block boundary */
586 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
587 return -EIO;
588
589 switch (sreq->alg) {
590 case QCE_HASH_SHA1:
591 diglen = SHA1_DIGEST_SIZE;
592 break;
593 case QCE_HASH_SHA256:
594 diglen = SHA256_DIGEST_SIZE;
595 break;
596 default:
597 return -EINVAL;
598 }
599 /*
600 * write 20/32 bytes, 5/8 words into auth_iv
601 * for SHA1/SHA256
602 */
603
604 if (sreq->first_blk) {
605 if (sreq->alg == QCE_HASH_SHA1) {
606 for (i = 0; i < 5; i++)
607 auth32[i] = _std_init_vector_sha1[i];
608 } else {
609 for (i = 0; i < 8; i++)
610 auth32[i] = _std_init_vector_sha256[i];
611 }
612 } else
613 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
614
615 rc = clk_enable(pce_dev->ce_clk);
616 if (rc)
617 return rc;
618
619 writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
620 writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
621 writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
622 writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
623 writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
624
625 if (sreq->alg == QCE_HASH_SHA256) {
626 writel_relaxed(auth32[5], pce_dev->iobase +
627 CRYPTO_AUTH_IV5_REG);
628 writel_relaxed(auth32[6], pce_dev->iobase +
629 CRYPTO_AUTH_IV6_REG);
630 writel_relaxed(auth32[7], pce_dev->iobase +
631 CRYPTO_AUTH_IV7_REG);
632 }
633 /* write auth_bytecnt 0/1, start with 0 */
634 writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
635 CRYPTO_AUTH_BYTECNT0_REG);
636 writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
637 CRYPTO_AUTH_BYTECNT1_REG);
638
639 /* write auth_seg_cfg */
640 writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
641 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
642
643 /*
644 * write seg_cfg
645 */
646
647 if (sreq->alg == QCE_HASH_SHA1)
648 cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
649 else
650 cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
651
652 if (sreq->first_blk)
653 cfg |= 1 << CRYPTO_FIRST;
654 if (sreq->last_blk)
655 cfg |= 1 << CRYPTO_LAST;
656 cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
657 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
658
659 /* write seg_size */
660 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
661
662 /* issue go to crypto */
663 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
664 /* Ensure previous instructions (setting the GO register)
665 * was completed before issuing a DMA transfer request
666 */
667 mb();
668
669 return 0;
670}
671
672static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
673 uint32_t totallen, uint32_t coffset)
674{
675 uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
676 0, 0, 0, 0, 0};
677 uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
678 0, 0, 0, 0, 0, 0, 0, 0};
679 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
680 0, 0, 0, 0};
681 uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
682 int aes_key_chg;
683 int i, rc;
684 uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
685 uint32_t cfg;
686 uint32_t ivsize = q_req->ivsize;
687
688 rc = clk_enable(pce_dev->ce_clk);
689 if (rc)
690 return rc;
691
692 cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
693 if (q_req->op == QCE_REQ_AEAD) {
694
695 /* do authentication setup */
696
697 cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
698 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
699
700 /* write sha1 init vector */
701 writel_relaxed(_std_init_vector_sha1[0],
702 pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
703 writel_relaxed(_std_init_vector_sha1[1],
704 pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
705 writel_relaxed(_std_init_vector_sha1[2],
706 pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
707 writel_relaxed(_std_init_vector_sha1[3],
708 pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
709 writel_relaxed(_std_init_vector_sha1[4],
710 pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
711 /* write hmac key */
712 _byte_stream_to_net_words(hmackey, q_req->authkey,
713 q_req->authklen);
714 writel_relaxed(hmackey[0], pce_dev->iobase +
715 CRYPTO_AUTH_IV5_REG);
716 writel_relaxed(hmackey[1], pce_dev->iobase +
717 CRYPTO_AUTH_IV6_REG);
718 writel_relaxed(hmackey[2], pce_dev->iobase +
719 CRYPTO_AUTH_IV7_REG);
720 writel_relaxed(hmackey[3], pce_dev->iobase +
721 CRYPTO_AUTH_IV8_REG);
722 writel_relaxed(hmackey[4], pce_dev->iobase +
723 CRYPTO_AUTH_IV9_REG);
724 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
725 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
726
727 /* write auth_seg_cfg */
728 writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
729 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
730
731 }
732
733 _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
734
735 switch (q_req->mode) {
736 case QCE_MODE_ECB:
737 cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
738 break;
739
740 case QCE_MODE_CBC:
741 cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
742 break;
743
744 case QCE_MODE_CTR:
745 default:
746 cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
747 break;
748 }
749 pce_dev->mode = q_req->mode;
750
751 switch (q_req->alg) {
752 case CIPHER_ALG_DES:
753 if (q_req->mode != QCE_MODE_ECB) {
754 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
755 writel_relaxed(enciv32[0], pce_dev->iobase +
756 CRYPTO_CNTR0_IV0_REG);
757 writel_relaxed(enciv32[1], pce_dev->iobase +
758 CRYPTO_CNTR1_IV1_REG);
759 }
760 writel_relaxed(enckey32[0], pce_dev->iobase +
761 CRYPTO_DES_KEY0_REG);
762 writel_relaxed(enckey32[1], pce_dev->iobase +
763 CRYPTO_DES_KEY1_REG);
764 cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
765 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
766 break;
767
768 case CIPHER_ALG_3DES:
769 if (q_req->mode != QCE_MODE_ECB) {
770 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
771 writel_relaxed(enciv32[0], pce_dev->iobase +
772 CRYPTO_CNTR0_IV0_REG);
773 writel_relaxed(enciv32[1], pce_dev->iobase +
774 CRYPTO_CNTR1_IV1_REG);
775 }
776 writel_relaxed(enckey32[0], pce_dev->iobase +
777 CRYPTO_DES_KEY0_REG);
778 writel_relaxed(enckey32[1], pce_dev->iobase +
779 CRYPTO_DES_KEY1_REG);
780 writel_relaxed(enckey32[2], pce_dev->iobase +
781 CRYPTO_DES_KEY2_REG);
782 writel_relaxed(enckey32[3], pce_dev->iobase +
783 CRYPTO_DES_KEY3_REG);
784 writel_relaxed(enckey32[4], pce_dev->iobase +
785 CRYPTO_DES_KEY4_REG);
786 writel_relaxed(enckey32[5], pce_dev->iobase +
787 CRYPTO_DES_KEY5_REG);
788 cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
789 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
790 break;
791
792 case CIPHER_ALG_AES:
793 default:
794 if (q_req->mode != QCE_MODE_ECB) {
795 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
796 writel_relaxed(enciv32[0], pce_dev->iobase +
797 CRYPTO_CNTR0_IV0_REG);
798 writel_relaxed(enciv32[1], pce_dev->iobase +
799 CRYPTO_CNTR1_IV1_REG);
800 writel_relaxed(enciv32[2], pce_dev->iobase +
801 CRYPTO_CNTR2_IV2_REG);
802 writel_relaxed(enciv32[3], pce_dev->iobase +
803 CRYPTO_CNTR3_IV3_REG);
804 }
805 /* set number of counter bits */
806 writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
807
808 if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
809 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
810 CRYPTO_ENCR_KEY_SZ);
811 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
812 } else {
813 switch (q_req->encklen) {
814 case AES128_KEY_SIZE:
815 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
816 CRYPTO_ENCR_KEY_SZ);
817 break;
818 case AES192_KEY_SIZE:
819 cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
820 CRYPTO_ENCR_KEY_SZ);
821 break;
822 case AES256_KEY_SIZE:
823 default:
824 cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
825 CRYPTO_ENCR_KEY_SZ);
826
827 /* check for null key. If null, use hw key*/
828 for (i = 0; i < enck_size_in_word; i++) {
829 if (enckey32[i] != 0)
830 break;
831 }
832 if (i == enck_size_in_word)
833 cfg |= 1 << CRYPTO_USE_HW_KEY;
834 break;
835 } /* end of switch (q_req->encklen) */
836
837 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
838 if (pce_dev->aes_key_size != q_req->encklen)
839 aes_key_chg = 1;
840 else {
841 for (i = 0; i < enck_size_in_word; i++) {
842 if (enckey32[i] != pce_dev->aeskey[i])
843 break;
844 }
845 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
846 }
847
848 if (aes_key_chg) {
849 if (pce_dev->fastaes) {
850 for (i = 0; i < enck_size_in_word;
851 i++) {
852 writel_relaxed(enckey32[i],
853 pce_dev->iobase +
854 CRYPTO_AES_RNDKEY0 +
855 (i * sizeof(uint32_t)));
856 }
857 } else {
858 /* size in bit */
859 _aes_expand_key_schedule(
860 q_req->encklen * 8,
861 enckey32, aes_round_key);
862
863 for (i = 0; i < CRYPTO_AES_RNDKEYS;
864 i++) {
865 writel_relaxed(aes_round_key[i],
866 pce_dev->iobase +
867 CRYPTO_AES_RNDKEY0 +
868 (i * sizeof(uint32_t)));
869 }
870 }
871
872 pce_dev->aes_key_size = q_req->encklen;
873 for (i = 0; i < enck_size_in_word; i++)
874 pce_dev->aeskey[i] = enckey32[i];
875 } /*if (aes_key_chg) { */
876 } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
877 break;
878 } /* end of switch (q_req->mode) */
879
880 if (q_req->dir == QCE_ENCRYPT)
881 cfg |= (1 << CRYPTO_AUTH_POS);
882 cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
883
884 /* write encr seg cfg */
885 writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
886 (coffset & 0xffff), /* cipher offset */
887 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
888
889 /* write seg cfg and size */
890 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
891 writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
892
893 /* issue go to crypto */
894 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
895 /* Ensure previous instructions (setting the GO register)
896 * was completed before issuing a DMA transfer request
897 */
898 mb();
899 return 0;
900};
901
902static int _aead_complete(struct qce_device *pce_dev)
903{
904 struct aead_request *areq;
905 struct crypto_aead *aead;
906 uint32_t ivsize;
907 uint32_t iv_out[4];
908 unsigned char iv[4 * sizeof(uint32_t)];
909 uint32_t status;
910
911 areq = (struct aead_request *) pce_dev->areq;
912 aead = crypto_aead_reqtfm(areq);
913 ivsize = crypto_aead_ivsize(aead);
914
915 if (areq->src != areq->dst) {
916 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
917 DMA_FROM_DEVICE);
918 }
919 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
920 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
921 DMA_TO_DEVICE);
922 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
923 ivsize, DMA_TO_DEVICE);
924 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
925 DMA_TO_DEVICE);
926
927 /* check ce error status */
928 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
929 if (status & (1 << CRYPTO_SW_ERR)) {
930 pce_dev->err++;
931 dev_err(pce_dev->pdev,
932 "Qualcomm Crypto Error at 0x%x, status%x\n",
933 pce_dev->phy_iobase, status);
934 _init_ce_engine(pce_dev);
935 clk_disable(pce_dev->ce_clk);
936 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
937 return 0;
938 };
939
940 /* get iv out */
941 if (pce_dev->mode == QCE_MODE_ECB) {
942 clk_disable(pce_dev->ce_clk);
943 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
944 pce_dev->chan_ce_in_status |
945 pce_dev->chan_ce_out_status);
946 } else {
947
948 iv_out[0] = readl_relaxed(pce_dev->iobase +
949 CRYPTO_CNTR0_IV0_REG);
950 iv_out[1] = readl_relaxed(pce_dev->iobase +
951 CRYPTO_CNTR1_IV1_REG);
952 iv_out[2] = readl_relaxed(pce_dev->iobase +
953 CRYPTO_CNTR2_IV2_REG);
954 iv_out[3] = readl_relaxed(pce_dev->iobase +
955 CRYPTO_CNTR3_IV3_REG);
956
957 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
958 clk_disable(pce_dev->ce_clk);
959 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
960 pce_dev->chan_ce_in_status |
961 pce_dev->chan_ce_out_status);
962 };
963 return 0;
964};
965
966static void _sha_complete(struct qce_device *pce_dev)
967{
968
969 struct ahash_request *areq;
970 uint32_t auth_data[2];
971 uint32_t status;
972
973 areq = (struct ahash_request *) pce_dev->areq;
974 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
975 DMA_TO_DEVICE);
976
977 /* check ce error status */
978 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
979 if (status & (1 << CRYPTO_SW_ERR)) {
980 pce_dev->err++;
981 dev_err(pce_dev->pdev,
982 "Qualcomm Crypto Error at 0x%x, status%x\n",
983 pce_dev->phy_iobase, status);
984 _init_ce_engine(pce_dev);
985 clk_disable(pce_dev->ce_clk);
986 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
987 return;
988 };
989
990 auth_data[0] = readl_relaxed(pce_dev->iobase +
991 CRYPTO_AUTH_BYTECNT0_REG);
992 auth_data[1] = readl_relaxed(pce_dev->iobase +
993 CRYPTO_AUTH_BYTECNT1_REG);
994 /* Ensure previous instruction (retriving byte count information)
995 * was completed before disabling the clk.
996 */
997 mb();
998 clk_disable(pce_dev->ce_clk);
999 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
1000 pce_dev->chan_ce_in_status);
1001};
1002
1003static int _ablk_cipher_complete(struct qce_device *pce_dev)
1004{
1005 struct ablkcipher_request *areq;
1006 uint32_t iv_out[4];
1007 unsigned char iv[4 * sizeof(uint32_t)];
1008 uint32_t status;
1009
1010 areq = (struct ablkcipher_request *) pce_dev->areq;
1011
1012 if (areq->src != areq->dst) {
1013 dma_unmap_sg(pce_dev->pdev, areq->dst,
1014 pce_dev->dst_nents, DMA_FROM_DEVICE);
1015 }
1016 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1017 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1018 DMA_TO_DEVICE);
1019
1020 /* check ce error status */
1021 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1022 if (status & (1 << CRYPTO_SW_ERR)) {
1023 pce_dev->err++;
1024 dev_err(pce_dev->pdev,
1025 "Qualcomm Crypto Error at 0x%x, status%x\n",
1026 pce_dev->phy_iobase, status);
1027 _init_ce_engine(pce_dev);
1028 clk_disable(pce_dev->ce_clk);
1029 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1030 return 0;
1031 };
1032
1033 /* get iv out */
1034 if (pce_dev->mode == QCE_MODE_ECB) {
1035 clk_disable(pce_dev->ce_clk);
1036 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1037 pce_dev->chan_ce_out_status);
1038 } else {
1039 iv_out[0] = readl_relaxed(pce_dev->iobase +
1040 CRYPTO_CNTR0_IV0_REG);
1041 iv_out[1] = readl_relaxed(pce_dev->iobase +
1042 CRYPTO_CNTR1_IV1_REG);
1043 iv_out[2] = readl_relaxed(pce_dev->iobase +
1044 CRYPTO_CNTR2_IV2_REG);
1045 iv_out[3] = readl_relaxed(pce_dev->iobase +
1046 CRYPTO_CNTR3_IV3_REG);
1047
1048 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1049 clk_disable(pce_dev->ce_clk);
1050 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1051 pce_dev->chan_ce_out_status);
1052 }
1053
1054 return 0;
1055};
1056
1057static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
1058{
1059 struct ablkcipher_request *areq;
1060 uint32_t iv_out[4];
1061 unsigned char iv[4 * sizeof(uint32_t)];
1062 uint32_t status;
1063
1064 areq = (struct ablkcipher_request *) pce_dev->areq;
1065
1066 /* check ce error status */
1067 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1068 if (status & (1 << CRYPTO_SW_ERR)) {
1069 pce_dev->err++;
1070 dev_err(pce_dev->pdev,
1071 "Qualcomm Crypto Error at 0x%x, status%x\n",
1072 pce_dev->phy_iobase, status);
1073 _init_ce_engine(pce_dev);
1074 clk_disable(pce_dev->ce_clk);
1075 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1076 return 0;
1077 };
1078
1079 /* get iv out */
1080 if (pce_dev->mode == QCE_MODE_ECB) {
1081 clk_disable(pce_dev->ce_clk);
1082 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1083 pce_dev->chan_ce_out_status);
1084 } else {
1085 iv_out[0] = readl_relaxed(pce_dev->iobase +
1086 CRYPTO_CNTR0_IV0_REG);
1087 iv_out[1] = readl_relaxed(pce_dev->iobase +
1088 CRYPTO_CNTR1_IV1_REG);
1089 iv_out[2] = readl_relaxed(pce_dev->iobase +
1090 CRYPTO_CNTR2_IV2_REG);
1091 iv_out[3] = readl_relaxed(pce_dev->iobase +
1092 CRYPTO_CNTR3_IV3_REG);
1093
1094 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1095 clk_disable(pce_dev->ce_clk);
1096 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1097 pce_dev->chan_ce_out_status);
1098 }
1099
1100 return 0;
1101};
1102
1103
1104
1105static int _chain_sg_buffer_in(struct qce_device *pce_dev,
1106 struct scatterlist *sg, unsigned int nbytes)
1107{
1108 unsigned int len;
1109 unsigned int dlen;
1110 struct dmov_desc *pdesc;
1111
1112 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1113 /*
1114 * Two consective chunks may be handled by the old
1115 * buffer descriptor.
1116 */
1117 while (nbytes > 0) {
1118 len = min(nbytes, sg_dma_len(sg));
1119 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1120 nbytes -= len;
1121 if (dlen == 0) {
1122 pdesc->addr = sg_dma_address(sg);
1123 pdesc->len = len;
1124 } else if (sg_dma_address(sg) == (pdesc->addr + dlen))
1125 pdesc->len = dlen + len;
1126 else {
1127 pce_dev->ce_in_src_desc_index++;
1128 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1129 return -ENOMEM;
1130 pdesc++;
1131 pdesc->len = len;
1132 pdesc->addr = sg_dma_address(sg);
1133 }
1134 if (nbytes > 0)
1135 sg = sg_next(sg);
1136 }
1137 return 0;
1138}
1139
1140static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1141 unsigned int pmem, unsigned int nbytes)
1142{
1143 unsigned int dlen;
1144 struct dmov_desc *pdesc;
1145
1146 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1147 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1148 if (dlen == 0) {
1149 pdesc->addr = pmem;
1150 pdesc->len = nbytes;
1151 } else if (pmem == (pdesc->addr + dlen)) {
1152 pdesc->len = dlen + nbytes;
1153 } else {
1154 pce_dev->ce_in_src_desc_index++;
1155 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1156 return -ENOMEM;
1157 pdesc++;
1158 pdesc->len = nbytes;
1159 pdesc->addr = pmem;
1160 }
1161 return 0;
1162}
1163
1164static void _chain_buffer_in_init(struct qce_device *pce_dev)
1165{
1166 struct dmov_desc *pdesc;
1167
1168 pce_dev->ce_in_src_desc_index = 0;
1169 pdesc = pce_dev->ce_in_src_desc;
1170 pdesc->len = 0;
1171}
1172
1173static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1174{
1175 struct dmov_desc *pdesc;
1176 dmov_sg *pcmd;
1177
1178 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1179 pdesc->len |= ADM_DESC_LAST;
1180 pdesc = pce_dev->ce_in_dst_desc;
1181 pdesc->len = ADM_DESC_LAST | total;
1182
1183 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1184 if (ncmd == 1)
1185 pcmd->cmd |= CMD_LC;
1186 else {
1187 dmov_s *pscmd;
1188
1189 pcmd->cmd &= ~CMD_LC;
1190 pcmd++;
1191 pscmd = (dmov_s *)pcmd;
1192 pscmd->cmd |= CMD_LC;
1193 }
1194
1195#ifdef QCE_DEBUG
1196 dev_info(pce_dev->pdev, "_ce_in_final %d\n",
1197 pce_dev->ce_in_src_desc_index);
1198#endif
1199}
1200
1201#ifdef QCE_DEBUG
1202static void _ce_in_dump(struct qce_device *pce_dev)
1203{
1204 int i;
1205 struct dmov_desc *pdesc;
1206
1207 dev_info(pce_dev->pdev, "_ce_in_dump\n");
1208 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1209 pdesc = pce_dev->ce_in_src_desc + i;
1210 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1211 pdesc->len);
1212 }
1213 pdesc = pce_dev->ce_in_dst_desc;
1214 dev_info(pce_dev->pdev, "dst - %x , %x\n", pdesc->addr,
1215 pdesc->len);
1216};
1217
1218static void _ce_out_dump(struct qce_device *pce_dev)
1219{
1220 int i;
1221 struct dmov_desc *pdesc;
1222
1223 dev_info(pce_dev->pdev, "_ce_out_dump\n");
1224 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1225 pdesc = pce_dev->ce_out_dst_desc + i;
1226 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1227 pdesc->len);
1228 }
1229 pdesc = pce_dev->ce_out_src_desc;
1230 dev_info(pce_dev->pdev, "src - %x , %x\n", pdesc->addr,
1231 pdesc->len);
1232};
1233#endif
1234
1235static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1236 struct scatterlist *sg, unsigned int nbytes)
1237{
1238 unsigned int len;
1239 unsigned int dlen;
1240 struct dmov_desc *pdesc;
1241
1242 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1243 /*
1244 * Two consective chunks may be handled by the old
1245 * buffer descriptor.
1246 */
1247 while (nbytes > 0) {
1248 len = min(nbytes, sg_dma_len(sg));
1249 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1250 nbytes -= len;
1251 if (dlen == 0) {
1252 pdesc->addr = sg_dma_address(sg);
1253 pdesc->len = len;
1254 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1255 pdesc->len = dlen + len;
1256 } else {
1257 pce_dev->ce_out_dst_desc_index++;
1258 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1259 return -EIO;
1260 pdesc++;
1261 pdesc->len = len;
1262 pdesc->addr = sg_dma_address(sg);
1263 }
1264 if (nbytes > 0)
1265 sg = sg_next(sg);
1266 }
1267 return 0;
1268}
1269
1270static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1271 unsigned int pmem, unsigned int nbytes)
1272{
1273 unsigned int dlen;
1274 struct dmov_desc *pdesc;
1275
1276 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1277 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1278
1279 if (dlen == 0) {
1280 pdesc->addr = pmem;
1281 pdesc->len = nbytes;
1282 } else if (pmem == (pdesc->addr + dlen)) {
1283 pdesc->len = dlen + nbytes;
1284 } else {
1285 pce_dev->ce_out_dst_desc_index++;
1286 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1287 return -EIO;
1288 pdesc++;
1289 pdesc->len = nbytes;
1290 pdesc->addr = pmem;
1291 }
1292 return 0;
1293};
1294
1295static void _chain_buffer_out_init(struct qce_device *pce_dev)
1296{
1297 struct dmov_desc *pdesc;
1298
1299 pce_dev->ce_out_dst_desc_index = 0;
1300 pdesc = pce_dev->ce_out_dst_desc;
1301 pdesc->len = 0;
1302};
1303
1304static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1305{
1306 struct dmov_desc *pdesc;
1307 dmov_sg *pcmd;
1308
1309 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1310 pdesc->len |= ADM_DESC_LAST;
1311 pdesc = pce_dev->ce_out_src_desc;
1312 pdesc->len = ADM_DESC_LAST | total;
1313 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1314 if (ncmd == 1)
1315 pcmd->cmd |= CMD_LC;
1316 else {
1317 dmov_s *pscmd;
1318
1319 pcmd->cmd &= ~CMD_LC;
1320 pcmd++;
1321 pscmd = (dmov_s *)pcmd;
1322 pscmd->cmd |= CMD_LC;
1323 }
1324#ifdef QCE_DEBUG
1325 dev_info(pce_dev->pdev, "_ce_out_final %d\n",
1326 pce_dev->ce_out_dst_desc_index);
1327#endif
1328
1329};
1330
1331static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1332 unsigned int result, struct msm_dmov_errdata *err)
1333{
1334 struct qce_device *pce_dev;
1335
1336 pce_dev = (struct qce_device *) cmd_ptr->user;
1337 if (result != ADM_STATUS_OK) {
1338 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1339 result);
1340 pce_dev->chan_ce_in_status = -1;
1341 } else
1342 pce_dev->chan_ce_in_status = 0;
1343
1344 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1345 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1346 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1347 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1348
1349 /* done */
1350 _aead_complete(pce_dev);
1351 }
1352};
1353
1354static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1355 unsigned int result, struct msm_dmov_errdata *err)
1356{
1357 struct qce_device *pce_dev;
1358
1359 pce_dev = (struct qce_device *) cmd_ptr->user;
1360 if (result != ADM_STATUS_OK) {
1361 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1362 result);
1363 pce_dev->chan_ce_out_status = -1;
1364 } else {
1365 pce_dev->chan_ce_out_status = 0;
1366 };
1367
1368 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1369 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1370 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1371 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1372
1373 /* done */
1374 _aead_complete(pce_dev);
1375 }
1376
1377};
1378
1379static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1380 unsigned int result, struct msm_dmov_errdata *err)
1381{
1382 struct qce_device *pce_dev;
1383
1384 pce_dev = (struct qce_device *) cmd_ptr->user;
1385 if (result != ADM_STATUS_OK) {
1386 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1387 result);
1388 pce_dev->chan_ce_in_status = -1;
1389 } else
1390 pce_dev->chan_ce_in_status = 0;
1391 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1392 _sha_complete(pce_dev);
1393};
1394
1395static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1396 unsigned int result, struct msm_dmov_errdata *err)
1397{
1398 struct qce_device *pce_dev;
1399
1400 pce_dev = (struct qce_device *) cmd_ptr->user;
1401 if (result != ADM_STATUS_OK) {
1402 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1403 result);
1404 pce_dev->chan_ce_in_status = -1;
1405 } else
1406 pce_dev->chan_ce_in_status = 0;
1407
1408 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1409 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1410 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1411 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1412
1413 /* done */
1414 _ablk_cipher_complete(pce_dev);
1415 }
1416};
1417
1418static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1419 unsigned int result, struct msm_dmov_errdata *err)
1420{
1421 struct qce_device *pce_dev;
1422
1423 pce_dev = (struct qce_device *) cmd_ptr->user;
1424 if (result != ADM_STATUS_OK) {
1425 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1426 result);
1427 pce_dev->chan_ce_out_status = -1;
1428 } else {
1429 pce_dev->chan_ce_out_status = 0;
1430 };
1431
1432 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1433 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1434 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1435 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1436
1437 /* done */
1438 _ablk_cipher_complete(pce_dev);
1439 }
1440};
1441
1442
1443static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1444 unsigned int result, struct msm_dmov_errdata *err)
1445{
1446 struct qce_device *pce_dev;
1447
1448 pce_dev = (struct qce_device *) cmd_ptr->user;
1449 if (result != ADM_STATUS_OK) {
1450 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1451 result);
1452 pce_dev->chan_ce_in_status = -1;
1453 } else
1454 pce_dev->chan_ce_in_status = 0;
1455
1456 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1457 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1458 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1459 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1460
1461 /* done */
1462 _ablk_cipher_use_pmem_complete(pce_dev);
1463 }
1464};
1465
1466static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1467 unsigned int result, struct msm_dmov_errdata *err)
1468{
1469 struct qce_device *pce_dev;
1470
1471 pce_dev = (struct qce_device *) cmd_ptr->user;
1472 if (result != ADM_STATUS_OK) {
1473 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1474 result);
1475 pce_dev->chan_ce_out_status = -1;
1476 } else {
1477 pce_dev->chan_ce_out_status = 0;
1478 };
1479
1480 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1481 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1482 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1483 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1484
1485 /* done */
1486 _ablk_cipher_use_pmem_complete(pce_dev);
1487 }
1488};
1489
1490static int _setup_cmd_template(struct qce_device *pce_dev)
1491{
1492 dmov_sg *pcmd;
1493 dmov_s *pscmd;
1494 struct dmov_desc *pdesc;
1495 unsigned char *vaddr;
1496
1497 /* Divide up the 4K coherent memory */
1498 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1499 vaddr = pce_dev->coh_vmem;
1500 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1501 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1502 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1503 (vaddr - pce_dev->coh_vmem);
1504 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1505
1506 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1507 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1508 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1509 (vaddr - pce_dev->coh_vmem);
1510 vaddr = vaddr + sizeof(struct dmov_desc) ;
1511
1512 /*
1513 * 3. ce_in channel command list of one scatter gather command
1514 * and one simple command.
1515 */
1516 pce_dev->cmd_list_ce_in = vaddr;
1517 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1518 + (vaddr - pce_dev->coh_vmem);
1519 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1520
1521 /* 4. authentication result. */
1522 pce_dev->dig_result = vaddr;
1523 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1524 (vaddr - pce_dev->coh_vmem);
1525 vaddr = vaddr + SHA256_DIGESTSIZE;
1526
1527 /*
1528 * 5. ce_out channel command list of one scatter gather command
1529 * and one simple command.
1530 */
1531 pce_dev->cmd_list_ce_out = vaddr;
1532 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1533 + (vaddr - pce_dev->coh_vmem);
1534 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1535
1536 /* 6. ce_out channel command src descriptors, 1 entry */
1537 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1538 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1539 + (vaddr - pce_dev->coh_vmem);
1540 vaddr = vaddr + sizeof(struct dmov_desc) ;
1541
1542 /* 7. ce_out channel command dst descriptors, 128 entries. */
1543 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1544 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1545 + (vaddr - pce_dev->coh_vmem);
1546 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1547
1548 /* 8. pad area. */
1549 pce_dev->ce_pad = vaddr;
1550 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1551 (vaddr - pce_dev->coh_vmem);
1552 vaddr = vaddr + ADM_CE_BLOCK_SIZE;
1553
1554 /* 9. ce_in channel command pointer list. */
1555 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1556 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1557 (vaddr - pce_dev->coh_vmem);
1558 vaddr = vaddr + sizeof(unsigned char *);
1559 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1560
1561 /* 10. ce_ou channel command pointer list. */
1562 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1563 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1564 (vaddr - pce_dev->coh_vmem);
1565 vaddr = vaddr + sizeof(unsigned char *);
1566
1567 /* 11. throw away area to store by-pass data from ce_out. */
1568 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1569 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1570 + (vaddr - pce_dev->coh_vmem);
1571 pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
1572 pce_dev->coh_vmem); /* at least 1.5 K of space */
1573 /*
1574 * The first command of command list ce_in is for the input of
1575 * concurrent operation of encrypt/decrypt or for the input
1576 * of authentication.
1577 */
1578 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1579 /* swap byte and half word , dst crci , scatter gather */
1580 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1581 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1582 pdesc = pce_dev->ce_in_src_desc;
1583 pdesc->addr = 0; /* to be filled in each operation */
1584 pdesc->len = 0; /* to be filled in each operation */
1585 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1586 pdesc = pce_dev->ce_in_dst_desc;
1587 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1588 pdesc->len = 0 | ADM_DESC_LAST; /* to be filled in each operation */
1589 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1590 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1591 DST_INDEX_SG_CMD(0);
1592 pcmd++;
1593 /*
1594 * The second command is for the digested data of
1595 * hashing operation only. For others, this command is not used.
1596 */
1597 pscmd = (dmov_s *) pcmd;
1598 /* last command, swap byte, half word, src crci, single */
1599 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1600 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1601 pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1602 pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
1603 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1604 /* setup command pointer list */
1605 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1606 DMOV_CMD_ADDR((unsigned int)
1607 pce_dev->phy_cmd_list_ce_in));
1608 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1609 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1610 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1611 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1612 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1613 pce_dev->crci_in, pce_dev->crci_hash);
1614 /*
1615 * The first command in the command list ce_out.
1616 * It is for encry/decryp output.
1617 * If hashing only, ce_out is not used.
1618 */
1619 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1620 /* swap byte, half word, source crci, scatter gather */
1621 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1622 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1623 pdesc = pce_dev->ce_out_src_desc;
1624 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1625 pdesc->len = 0; /* to be filled in each opeation */
1626 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1627 pdesc = pce_dev->ce_out_dst_desc;
1628 pdesc->addr = 0; /* to be filled in each opeation */
1629 pdesc->len = 0; /* to be filled in each opeation */
1630 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1631 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1632 DST_INDEX_SG_CMD(0);
1633 pcmd++;
1634 /*
1635 * The second command is for digested data of esp operation.
1636 * For ciphering, this command is not used.
1637 */
1638 pscmd = (dmov_s *) pcmd;
1639 /* last command, swap byte, half word, src crci, single */
1640 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1641 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1642 pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1643 pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
1644 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1645 /* setup command pointer list */
1646 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1647 DMOV_CMD_ADDR((unsigned int)pce_dev->
1648 phy_cmd_list_ce_out));
1649
1650 pce_dev->chan_ce_out_cmd->user = pce_dev;
1651 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1652 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1653 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1654 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1655 pce_dev->crci_out, pce_dev->crci_hash);
1656
1657
1658 return 0;
1659};
1660
1661static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1662{
1663
1664 if (ce_in)
1665 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1666 else
1667 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1668
1669 if (ce_out)
1670 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1671 else
1672 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1673
1674 if (ce_in)
1675 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1676 pce_dev->chan_ce_in_cmd);
1677 if (ce_out)
1678 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1679 pce_dev->chan_ce_out_cmd);
1680
1681 return 0;
1682};
1683
1684static void _f9_complete(struct qce_device *pce_dev)
1685{
1686 uint32_t mac_i;
1687 uint32_t status;
1688
1689 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1690 pce_dev->ota_size, DMA_TO_DEVICE);
1691
1692 /* check ce error status */
1693 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1694 if (status & (1 << CRYPTO_SW_ERR)) {
1695 pce_dev->err++;
1696 dev_err(pce_dev->pdev,
1697 "Qualcomm Crypto Error at 0x%x, status%x\n",
1698 pce_dev->phy_iobase, status);
1699 _init_ce_engine(pce_dev);
1700 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1701 return;
1702 };
1703
1704 mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1705 pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
1706 pce_dev->chan_ce_in_status);
1707};
1708
1709static void _f8_complete(struct qce_device *pce_dev)
1710{
1711 uint32_t status;
1712
1713 if (pce_dev->phy_ota_dst != 0)
1714 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
1715 pce_dev->ota_size, DMA_FROM_DEVICE);
1716 if (pce_dev->phy_ota_src != 0)
1717 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1718 pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
1719 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
1720
1721 /* check ce error status */
1722 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1723 if (status & (1 << CRYPTO_SW_ERR)) {
1724 pce_dev->err++;
1725 dev_err(pce_dev->pdev,
1726 "Qualcomm Crypto Error at 0x%x, status%x\n",
1727 pce_dev->phy_iobase, status);
1728 _init_ce_engine(pce_dev);
1729 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1730 return;
1731 };
1732
1733 pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
1734 pce_dev->chan_ce_in_status |
1735 pce_dev->chan_ce_out_status);
1736};
1737
1738
1739static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1740 unsigned int result, struct msm_dmov_errdata *err)
1741{
1742 struct qce_device *pce_dev;
1743
1744 pce_dev = (struct qce_device *) cmd_ptr->user;
1745 if (result != ADM_STATUS_OK) {
1746 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1747 result);
1748 pce_dev->chan_ce_in_status = -1;
1749 } else
1750 pce_dev->chan_ce_in_status = 0;
1751 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1752 _f9_complete(pce_dev);
1753};
1754
1755static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1756 unsigned int result, struct msm_dmov_errdata *err)
1757{
1758 struct qce_device *pce_dev;
1759
1760 pce_dev = (struct qce_device *) cmd_ptr->user;
1761 if (result != ADM_STATUS_OK) {
1762 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1763 result);
1764 pce_dev->chan_ce_in_status = -1;
1765 } else
1766 pce_dev->chan_ce_in_status = 0;
1767
1768 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1769 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1770 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1771 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1772
1773 /* done */
1774 _f8_complete(pce_dev);
1775 }
1776};
1777
1778static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1779 unsigned int result, struct msm_dmov_errdata *err)
1780{
1781 struct qce_device *pce_dev;
1782
1783 pce_dev = (struct qce_device *) cmd_ptr->user;
1784 if (result != ADM_STATUS_OK) {
1785 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1786 result);
1787 pce_dev->chan_ce_out_status = -1;
1788 } else {
1789 pce_dev->chan_ce_out_status = 0;
1790 };
1791
1792 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1793 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1794 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1795 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1796
1797 /* done */
1798 _f8_complete(pce_dev);
1799 }
1800};
1801
1802static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
1803{
1804 uint32_t cfg;
1805 uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
1806
1807 _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
1808 writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1809 writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
1810 writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
1811 writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
1812 writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
1813
1814 writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1815 writel_relaxed(req->count_i, pce_dev->iobase +
1816 CRYPTO_AUTH_BYTECNT1_REG);
1817
1818 /* write auth_seg_cfg */
1819 writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
1820 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1821
1822 /* write seg_cfg */
1823 cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
1824 (1 << CRYPTO_LAST);
1825
1826 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1827 cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
1828 else
1829 cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
1830
1831 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1832 cfg |= 1 << CRYPTO_F9_DIRECTION;
1833
1834 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1835
1836 /* write seg_size */
1837 writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1838
1839 /* issue go to crypto */
1840 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1841
1842 /*
1843 * barrier to ensure previous instructions
1844 * (including GO) to CE finish before issue DMA transfer
1845 * request.
1846 */
1847 mb();
1848 return 0;
1849};
1850
1851static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1852 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1853 uint16_t cipher_size)
1854{
1855 uint32_t cfg;
1856 uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
1857
1858 if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
1859 (req->bearer >= QCE_OTA_MAX_BEARER))
1860 return -EINVAL;
1861
1862 /* write seg_cfg */
1863 cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
1864 (1 << CRYPTO_LAST);
1865 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1866 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
1867 else
1868 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
1869 if (key_stream_mode)
1870 cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
1871 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1872 cfg |= 1 << CRYPTO_F8_DIRECTION;
1873 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1874
1875 /* write seg_size */
1876 writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1877
1878 /* write 0 to auth_size, auth_offset */
1879 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1880
1881 /* write encr_seg_cfg seg_size, seg_offset */
1882 writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
1883 (cipher_offset & 0xffff),
1884 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1885
1886 /* write keys */
1887 _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
1888 writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
1889 writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
1890 writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
1891 writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
1892
1893 /* write cntr0_iv0 for countC */
1894 writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
1895
1896 /* write cntr1_iv1 for nPkts, and bearer */
1897 if (npkts == 1)
1898 npkts = 0;
1899 writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
1900 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
1901 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
1902
1903 /* issue go to crypto */
1904 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1905
1906 /*
1907 * barrier to ensure previous instructions
1908 * (including GO) to CE finish before issue DMA transfer
1909 * request.
1910 */
1911 mb();
1912 return 0;
1913};
1914
1915int qce_aead_req(void *handle, struct qce_req *q_req)
1916{
1917 struct qce_device *pce_dev = (struct qce_device *) handle;
1918 struct aead_request *areq = (struct aead_request *) q_req->areq;
1919 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1920 uint32_t ivsize = crypto_aead_ivsize(aead);
1921 uint32_t totallen;
1922 uint32_t pad_len;
1923 uint32_t authsize = crypto_aead_authsize(aead);
1924 int rc = 0;
1925
1926 q_req->ivsize = ivsize;
1927 if (q_req->dir == QCE_ENCRYPT)
1928 q_req->cryptlen = areq->cryptlen;
1929 else
1930 q_req->cryptlen = areq->cryptlen - authsize;
1931
1932 totallen = q_req->cryptlen + ivsize + areq->assoclen;
1933 pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
1934
1935 _chain_buffer_in_init(pce_dev);
1936 _chain_buffer_out_init(pce_dev);
1937
1938 pce_dev->assoc_nents = 0;
1939 pce_dev->phy_iv_in = 0;
1940 pce_dev->src_nents = 0;
1941 pce_dev->dst_nents = 0;
1942
1943 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1944 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1945 DMA_TO_DEVICE);
1946 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1947 rc = -ENOMEM;
1948 goto bad;
1949 }
1950
1951 /* cipher iv for input */
1952 pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
1953 ivsize, DMA_TO_DEVICE);
1954 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
1955 rc = -ENOMEM;
1956 goto bad;
1957 }
1958
1959 /* for output, ignore associated data and cipher iv */
1960 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1961 ivsize + areq->assoclen) < 0) {
1962 rc = -ENOMEM;
1963 goto bad;
1964 }
1965
1966 /* cipher input */
1967 pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
1968 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1969 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1970 DMA_TO_DEVICE);
1971 if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
1972 rc = -ENOMEM;
1973 goto bad;
1974 }
1975
1976 /* cipher output */
1977 if (areq->src != areq->dst) {
1978 pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
1979 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1980 DMA_FROM_DEVICE);
1981 };
1982 if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
1983 rc = -ENOMEM;
1984 goto bad;
1985 }
1986
1987 /* pad data */
1988 if (pad_len) {
1989 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1990 pad_len) < 0) {
1991 rc = -ENOMEM;
1992 goto bad;
1993 }
1994 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1995 pad_len) < 0) {
1996 rc = -ENOMEM;
1997 goto bad;
1998 }
1999 }
2000
2001 /* finalize the ce_in and ce_out channels command lists */
2002 _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2003 _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2004
2005 /* set up crypto device */
2006 rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
2007 if (rc < 0)
2008 goto bad;
2009
2010 /* setup for callback, and issue command to adm */
2011 pce_dev->areq = q_req->areq;
2012 pce_dev->qce_cb = q_req->qce_cb;
2013
2014 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
2015 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
2016
2017 rc = _qce_start_dma(pce_dev, true, true);
2018 if (rc == 0)
2019 return 0;
2020bad:
2021 if (pce_dev->assoc_nents) {
2022 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2023 DMA_TO_DEVICE);
2024 }
2025 if (pce_dev->phy_iv_in) {
2026 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
2027 ivsize, DMA_TO_DEVICE);
2028 }
2029 if (pce_dev->src_nents) {
2030 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2031 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2032 DMA_TO_DEVICE);
2033 }
2034 if (pce_dev->dst_nents) {
2035 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2036 DMA_FROM_DEVICE);
2037 }
2038 return rc;
2039}
2040EXPORT_SYMBOL(qce_aead_req);
2041
2042int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
2043{
2044 int rc = 0;
2045 struct qce_device *pce_dev = (struct qce_device *) handle;
2046 struct ablkcipher_request *areq = (struct ablkcipher_request *)
2047 c_req->areq;
2048
2049 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
2050 - areq->nbytes;
2051
2052 _chain_buffer_in_init(pce_dev);
2053 _chain_buffer_out_init(pce_dev);
2054
2055 pce_dev->src_nents = 0;
2056 pce_dev->dst_nents = 0;
2057 /* cipher input */
2058 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
2059
2060 if (c_req->use_pmem != 1)
2061 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2062 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2063 DMA_TO_DEVICE);
2064 else
2065 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
2066 areq->src);
2067
2068 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
2069 rc = -ENOMEM;
2070 goto bad;
2071 }
2072
2073 /* cipher output */
2074 if (areq->src != areq->dst) {
2075 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
2076 if (c_req->use_pmem != 1)
2077 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2078 DMA_FROM_DEVICE);
2079 else
2080 dma_map_pmem_sg(&c_req->pmem->dst[0],
2081 pce_dev->dst_nents, areq->dst);
2082 };
2083 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
2084 rc = -ENOMEM;
2085 goto bad;
2086 }
2087
2088 /* pad data */
2089 if (pad_len) {
2090 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2091 pad_len) < 0) {
2092 rc = -ENOMEM;
2093 goto bad;
2094 }
2095 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2096 pad_len) < 0) {
2097 rc = -ENOMEM;
2098 goto bad;
2099 }
2100 }
2101
2102 /* finalize the ce_in and ce_out channels command lists */
2103 _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
2104 _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
2105
2106#ifdef QCE_DEBUG
2107 _ce_in_dump(pce_dev);
2108 _ce_out_dump(pce_dev);
2109#endif
2110 /* set up crypto device */
2111 rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
2112 if (rc < 0)
2113 goto bad;
2114
2115 /* setup for callback, and issue command to adm */
2116 pce_dev->areq = areq;
2117 pce_dev->qce_cb = c_req->qce_cb;
2118 if (c_req->use_pmem == 1) {
2119 pce_dev->chan_ce_in_cmd->complete_func =
2120 _ablk_cipher_ce_in_call_back_pmem;
2121 pce_dev->chan_ce_out_cmd->complete_func =
2122 _ablk_cipher_ce_out_call_back_pmem;
2123 } else {
2124 pce_dev->chan_ce_in_cmd->complete_func =
2125 _ablk_cipher_ce_in_call_back;
2126 pce_dev->chan_ce_out_cmd->complete_func =
2127 _ablk_cipher_ce_out_call_back;
2128 }
2129 rc = _qce_start_dma(pce_dev, true, true);
2130
2131 if (rc == 0)
2132 return 0;
2133bad:
2134 if (c_req->use_pmem != 1) {
2135 if (pce_dev->dst_nents) {
2136 dma_unmap_sg(pce_dev->pdev, areq->dst,
2137 pce_dev->dst_nents, DMA_FROM_DEVICE);
2138 }
2139 if (pce_dev->src_nents) {
2140 dma_unmap_sg(pce_dev->pdev, areq->src,
2141 pce_dev->src_nents,
2142 (areq->src == areq->dst) ?
2143 DMA_BIDIRECTIONAL :
2144 DMA_TO_DEVICE);
2145 }
2146 }
2147 return rc;
2148}
2149EXPORT_SYMBOL(qce_ablk_cipher_req);
2150
2151int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
2152{
2153 struct qce_device *pce_dev = (struct qce_device *) handle;
2154 int rc;
2155 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
2156 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
2157
2158 _chain_buffer_in_init(pce_dev);
2159 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
2160 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
2161 DMA_TO_DEVICE);
2162
2163 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
2164 rc = -ENOMEM;
2165 goto bad;
2166 }
2167
2168 if (pad_len) {
2169 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2170 pad_len) < 0) {
2171 rc = -ENOMEM;
2172 goto bad;
2173 }
2174 }
2175 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
2176
2177#ifdef QCE_DEBUG
2178 _ce_in_dump(pce_dev);
2179#endif
2180
2181 rc = _sha_ce_setup(pce_dev, sreq);
2182
2183 if (rc < 0)
2184 goto bad;
2185
2186 pce_dev->areq = areq;
2187 pce_dev->qce_cb = sreq->qce_cb;
2188 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
2189
2190 rc = _qce_start_dma(pce_dev, true, false);
2191
2192 if (rc == 0)
2193 return 0;
2194bad:
2195 if (pce_dev->src_nents) {
2196 dma_unmap_sg(pce_dev->pdev, sreq->src,
2197 pce_dev->src_nents, DMA_TO_DEVICE);
2198 }
2199
2200 return rc;
2201}
2202EXPORT_SYMBOL(qce_process_sha_req);
2203
2204/*
2205 * crypto engine open function.
2206 */
2207void *qce_open(struct platform_device *pdev, int *rc)
2208{
2209 struct qce_device *pce_dev;
2210 struct resource *resource;
2211 struct clk *ce_clk;
2212
2213 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
2214 if (!pce_dev) {
2215 *rc = -ENOMEM;
2216 dev_err(&pdev->dev, "Can not allocate memory\n");
2217 return NULL;
2218 }
2219 pce_dev->pdev = &pdev->dev;
2220 ce_clk = clk_get(pce_dev->pdev, "ce_clk");
2221 if (IS_ERR(ce_clk)) {
Mona Hossaina8657d82011-07-11 16:30:08 -07002222 kfree(pce_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002223 *rc = PTR_ERR(ce_clk);
2224 return NULL;
2225 }
2226 pce_dev->ce_clk = ce_clk;
Mona Hossaina8657d82011-07-11 16:30:08 -07002227 *rc = clk_enable(pce_dev->ce_clk);
2228 if (*rc) {
2229 kfree(pce_dev);
2230 return NULL;
2231 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002232
2233 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2234 if (!resource) {
2235 *rc = -ENXIO;
2236 dev_err(pce_dev->pdev, "Missing MEM resource\n");
2237 goto err;
2238 };
2239 pce_dev->phy_iobase = resource->start;
2240 pce_dev->iobase = ioremap_nocache(resource->start,
2241 resource->end - resource->start + 1);
2242 if (!pce_dev->iobase) {
2243 *rc = -ENOMEM;
2244 dev_err(pce_dev->pdev, "Can not map io memory\n");
2245 goto err;
2246 }
2247
2248 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2249 GFP_KERNEL);
2250 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2251 GFP_KERNEL);
2252 if (pce_dev->chan_ce_in_cmd == NULL ||
2253 pce_dev->chan_ce_out_cmd == NULL) {
2254 dev_err(pce_dev->pdev, "Can not allocate memory\n");
2255 *rc = -ENOMEM;
2256 goto err;
2257 }
2258
2259 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2260 "crypto_channels");
2261 if (!resource) {
2262 *rc = -ENXIO;
2263 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
2264 goto err;
2265 };
2266 pce_dev->chan_ce_in = resource->start;
2267 pce_dev->chan_ce_out = resource->end;
2268 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2269 "crypto_crci_in");
2270 if (!resource) {
2271 *rc = -ENXIO;
2272 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
2273 goto err;
2274 };
2275 pce_dev->crci_in = resource->start;
2276 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2277 "crypto_crci_out");
2278 if (!resource) {
2279 *rc = -ENXIO;
2280 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
2281 goto err;
2282 };
2283 pce_dev->crci_out = resource->start;
2284 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2285 "crypto_crci_hash");
2286 if (!resource) {
2287 *rc = -ENXIO;
2288 dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
2289 goto err;
2290 };
2291 pce_dev->crci_hash = resource->start;
2292 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
2293 PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
2294
2295 if (pce_dev->coh_vmem == NULL) {
2296 *rc = -ENOMEM;
2297 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
2298 goto err;
2299 }
2300 _setup_cmd_template(pce_dev);
2301
2302 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
2303 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
2304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002305 if (_init_ce_engine(pce_dev)) {
2306 *rc = -ENXIO;
2307 clk_disable(pce_dev->ce_clk);
2308 goto err;
2309 }
2310 *rc = 0;
2311 clk_disable(pce_dev->ce_clk);
2312
2313 pce_dev->err = 0;
2314
2315 return pce_dev;
2316err:
2317 if (pce_dev)
2318 qce_close(pce_dev);
2319 return NULL;
2320}
2321EXPORT_SYMBOL(qce_open);
2322
2323/*
2324 * crypto engine close function.
2325 */
2326int qce_close(void *handle)
2327{
2328 struct qce_device *pce_dev = (struct qce_device *) handle;
2329
2330 if (handle == NULL)
2331 return -ENODEV;
2332 if (pce_dev->iobase)
2333 iounmap(pce_dev->iobase);
2334
2335 if (pce_dev->coh_vmem)
2336 dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
2337 pce_dev->coh_pmem);
2338 kfree(pce_dev->chan_ce_in_cmd);
2339 kfree(pce_dev->chan_ce_out_cmd);
2340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002341 clk_put(pce_dev->ce_clk);
Mona Hossain451cf982011-07-13 11:48:14 -07002342 kfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343 return 0;
2344}
2345EXPORT_SYMBOL(qce_close);
2346
2347int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2348{
2349 struct qce_device *pce_dev = (struct qce_device *) handle;
2350
2351 if (ce_support == NULL)
2352 return -EINVAL;
2353
2354 if (pce_dev->hmac == 1)
2355 ce_support->sha1_hmac_20 = true;
2356 else
2357 ce_support->sha1_hmac_20 = false;
2358 ce_support->sha1_hmac = false;
2359 ce_support->sha256_hmac = false;
2360 ce_support->sha_hmac = false;
2361 ce_support->cmac = false;
2362 ce_support->aes_key_192 = true;
2363 ce_support->aes_xts = false;
2364 ce_support->aes_ccm = false;
2365 ce_support->ota = pce_dev->ota;
2366 return 0;
2367}
2368EXPORT_SYMBOL(qce_hw_support);
2369
2370int qce_f8_req(void *handle, struct qce_f8_req *req,
2371 void *cookie, qce_comp_func_ptr_t qce_cb)
2372{
2373 struct qce_device *pce_dev = (struct qce_device *) handle;
2374 bool key_stream_mode;
2375 dma_addr_t dst;
2376 int rc;
2377 uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
2378 req->data_len;
2379
2380 _chain_buffer_in_init(pce_dev);
2381 _chain_buffer_out_init(pce_dev);
2382
2383 key_stream_mode = (req->data_in == NULL);
2384
2385 /* F8 cipher input */
2386 if (key_stream_mode)
2387 pce_dev->phy_ota_src = 0;
2388 else {
2389 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2390 req->data_in, req->data_len,
2391 (req->data_in == req->data_out) ?
2392 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2393 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2394 req->data_len) < 0) {
2395 pce_dev->phy_ota_dst = 0;
2396 rc = -ENOMEM;
2397 goto bad;
2398 }
2399 }
2400
2401 /* F8 cipher output */
2402 if (req->data_in != req->data_out) {
2403 dst = dma_map_single(pce_dev->pdev, req->data_out,
2404 req->data_len, DMA_FROM_DEVICE);
2405 pce_dev->phy_ota_dst = dst;
2406 } else {
2407 dst = pce_dev->phy_ota_src;
2408 pce_dev->phy_ota_dst = 0;
2409 }
2410 if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
2411 rc = -ENOMEM;
2412 goto bad;
2413 }
2414
2415 pce_dev->ota_size = req->data_len;
2416
2417 /* pad data */
2418 if (pad_len) {
2419 if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
2420 pce_dev->phy_ce_pad, pad_len) < 0) {
2421 rc = -ENOMEM;
2422 goto bad;
2423 }
2424 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2425 pad_len) < 0) {
2426 rc = -ENOMEM;
2427 goto bad;
2428 }
2429 }
2430
2431 /* finalize the ce_in and ce_out channels command lists */
2432 if (!key_stream_mode)
2433 _ce_in_final(pce_dev, 1, req->data_len + pad_len);
2434 _ce_out_final(pce_dev, 1, req->data_len + pad_len);
2435
2436 /* set up crypto device */
2437 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
2438 if (rc < 0)
2439 goto bad;
2440
2441 /* setup for callback, and issue command to adm */
2442 pce_dev->areq = cookie;
2443 pce_dev->qce_cb = qce_cb;
2444
2445 if (!key_stream_mode)
2446 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2447
2448 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2449
2450 rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
2451 if (rc == 0)
2452 return 0;
2453bad:
2454 if (pce_dev->phy_ota_dst != 0)
2455 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
2456 req->data_len, DMA_FROM_DEVICE);
2457 if (pce_dev->phy_ota_src != 0)
2458 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2459 req->data_len,
2460 (req->data_in == req->data_out) ?
2461 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2462 return rc;
2463}
2464EXPORT_SYMBOL(qce_f8_req);
2465
2466int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
2467 void *cookie, qce_comp_func_ptr_t qce_cb)
2468{
2469 struct qce_device *pce_dev = (struct qce_device *) handle;
2470 uint16_t num_pkt = mreq->num_pkt;
2471 uint16_t cipher_start = mreq->cipher_start;
2472 uint16_t cipher_size = mreq->cipher_size;
2473 struct qce_f8_req *req = &mreq->qce_f8_req;
2474 uint32_t total;
2475 uint32_t pad_len;
2476 dma_addr_t dst = 0;
2477 int rc = 0;
2478
2479 total = num_pkt * req->data_len;
2480 pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
2481
2482 _chain_buffer_in_init(pce_dev);
2483 _chain_buffer_out_init(pce_dev);
2484
2485 /* F8 cipher input */
2486 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2487 req->data_in, total,
2488 (req->data_in == req->data_out) ?
2489 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2490 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2491 total) < 0) {
2492 pce_dev->phy_ota_dst = 0;
2493 rc = -ENOMEM;
2494 goto bad;
2495 }
2496 /* F8 cipher output */
2497 if (req->data_in != req->data_out) {
2498 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
2499 DMA_FROM_DEVICE);
2500 pce_dev->phy_ota_dst = dst;
2501 } else {
2502 dst = pce_dev->phy_ota_src;
2503 pce_dev->phy_ota_dst = 0;
2504 }
2505 if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
2506 rc = -ENOMEM;
2507 goto bad;
2508 }
2509
2510 pce_dev->ota_size = total;
2511
2512 /* pad data */
2513 if (pad_len) {
2514 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2515 pad_len) < 0) {
2516 rc = -ENOMEM;
2517 goto bad;
2518 }
2519 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2520 pad_len) < 0) {
2521 rc = -ENOMEM;
2522 goto bad;
2523 }
2524 }
2525
2526 /* finalize the ce_in and ce_out channels command lists */
2527 _ce_in_final(pce_dev, 1, total + pad_len);
2528 _ce_out_final(pce_dev, 1, total + pad_len);
2529
2530
2531 /* set up crypto device */
2532 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
2533 cipher_size);
2534 if (rc)
2535 goto bad ;
2536
2537 /* setup for callback, and issue command to adm */
2538 pce_dev->areq = cookie;
2539 pce_dev->qce_cb = qce_cb;
2540
2541 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2542 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2543
2544 rc = _qce_start_dma(pce_dev, true, true);
2545 if (rc == 0)
2546 return 0;
2547bad:
2548 if (pce_dev->phy_ota_dst)
2549 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
2550 DMA_FROM_DEVICE);
2551 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
2552 (req->data_in == req->data_out) ?
2553 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2554 return rc;
2555}
2556EXPORT_SYMBOL(qce_f8_multi_pkt_req);
2557
2558int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
2559 qce_comp_func_ptr_t qce_cb)
2560{
2561 struct qce_device *pce_dev = (struct qce_device *) handle;
2562 int rc;
2563 uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
2564
2565 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
2566 req->msize, DMA_TO_DEVICE);
2567
2568 _chain_buffer_in_init(pce_dev);
2569 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
2570 if (rc < 0) {
2571 rc = -ENOMEM;
2572 goto bad;
2573 }
2574
2575 pce_dev->ota_size = req->msize;
2576 if (pad_len) {
2577 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2578 pad_len);
2579 if (rc < 0) {
2580 rc = -ENOMEM;
2581 goto bad;
2582 }
2583 }
2584 _ce_in_final(pce_dev, 2, req->msize + pad_len);
2585 rc = _ce_f9_setup(pce_dev, req);
2586 if (rc < 0)
2587 goto bad;
2588
2589 /* setup for callback, and issue command to adm */
2590 pce_dev->areq = cookie;
2591 pce_dev->qce_cb = qce_cb;
2592
2593 pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
2594
2595 rc = _qce_start_dma(pce_dev, true, false);
2596 if (rc == 0)
2597 return 0;
2598bad:
2599 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2600 req->msize, DMA_TO_DEVICE);
2601 return rc;
2602}
2603EXPORT_SYMBOL(qce_f9_req);
2604
2605MODULE_LICENSE("GPL v2");
2606MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2607MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossain451cf982011-07-13 11:48:14 -07002608MODULE_VERSION("1.13");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002609