blob: d1f564bc280bf1b26f9650b0a27fc86ef2631a09 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
27#include <crypto/hash.h>
28#include <crypto/sha.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070029#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <linux/qcota.h>
31#include <mach/dma.h>
32
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070033#include "qce.h"
34#include "qcryptohw_30.h"
35#include "qce_ota.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37/* ADM definitions */
38#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
39#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
40#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
41#define ADM_DESC_LAST (1 << 31)
42
43/* Data xfer between DM and CE in blocks of 16 bytes */
44#define ADM_CE_BLOCK_SIZE 16
45
46/* Data xfer between DM and CE in blocks of 64 bytes */
47#define ADM_SHA_BLOCK_SIZE 64
48
49#define ADM_DESC_LENGTH_MASK 0xffff
50#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
51
52struct dmov_desc {
53 uint32_t addr;
54 uint32_t len;
55};
56
57#define ADM_STATUS_OK 0x80000002
58
59/* Misc definitions */
60
61/* QCE max number of descriptor in a descriptor list */
62#define QCE_MAX_NUM_DESC 128
63
64/* State of DM channel */
65enum qce_chan_st_enum {
66 QCE_CHAN_STATE_IDLE = 0,
67 QCE_CHAN_STATE_IN_PROG = 1,
68 QCE_CHAN_STATE_COMP = 2,
69 QCE_CHAN_STATE_LAST
70};
71
72/*
73 * CE HW device structure.
74 * Each engine has an instance of the structure.
75 * Each engine can only handle one crypto operation at one time. It is up to
76 * the sw above to ensure single threading of operation on an engine.
77 */
78struct qce_device {
79 struct device *pdev; /* Handle to platform_device structure */
80 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
81 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
82 void __iomem *iobase; /* Virtual io base of CE HW */
83 unsigned int phy_iobase; /* Physical io base of CE HW */
84 struct clk *ce_clk; /* Handle to CE clk */
85 unsigned int crci_in; /* CRCI for CE DM IN Channel */
86 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
87 unsigned int crci_hash; /* CRCI for CE HASH */
88 unsigned int chan_ce_in; /* ADM channel used for CE input
89 * and auth result if authentication
90 * only operation. */
91 unsigned int chan_ce_out; /* ADM channel used for CE output,
92 and icv for esp */
93
94
95 unsigned int *cmd_pointer_list_ce_in;
96 dma_addr_t phy_cmd_pointer_list_ce_in;
97
98 unsigned int *cmd_pointer_list_ce_out;
99 dma_addr_t phy_cmd_pointer_list_ce_out;
100
101 unsigned char *cmd_list_ce_in;
102 dma_addr_t phy_cmd_list_ce_in;
103
104 unsigned char *cmd_list_ce_out;
105 dma_addr_t phy_cmd_list_ce_out;
106
107 struct dmov_desc *ce_out_src_desc;
108 dma_addr_t phy_ce_out_src_desc;
109
110 struct dmov_desc *ce_out_dst_desc;
111 dma_addr_t phy_ce_out_dst_desc;
112
113 struct dmov_desc *ce_in_src_desc;
114 dma_addr_t phy_ce_in_src_desc;
115
116 struct dmov_desc *ce_in_dst_desc;
117 dma_addr_t phy_ce_in_dst_desc;
118
119 unsigned char *ce_out_ignore;
120 dma_addr_t phy_ce_out_ignore;
121
122 unsigned char *ce_pad;
123 dma_addr_t phy_ce_pad;
124
125 struct msm_dmov_cmd *chan_ce_in_cmd;
126 struct msm_dmov_cmd *chan_ce_out_cmd;
127
128 uint32_t ce_out_ignore_size;
129
130 int ce_out_dst_desc_index;
131 int ce_in_src_desc_index;
132
133 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
134 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
135
136 int chan_ce_in_status; /* chan ce_in status */
137 int chan_ce_out_status; /* chan ce_out status */
138
139
140 unsigned char *dig_result;
141 dma_addr_t phy_dig_result;
142
143 /* cached aes key */
144 uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
145
146 uint32_t aes_key_size; /* cached aes key size in bytes */
147 int fastaes; /* ce supports fast aes */
148 int hmac; /* ce support hmac-sha1 */
149 bool ota; /* ce support ota */
150
151 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
152
153 int assoc_nents;
154 int src_nents;
155 int dst_nents;
156
157 void *areq;
158 enum qce_cipher_mode_enum mode;
159
160 dma_addr_t phy_iv_in;
161 dma_addr_t phy_ota_src;
162 dma_addr_t phy_ota_dst;
163 unsigned int ota_size;
164 int err;
165};
166
167/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
168static uint32_t _std_init_vector_sha1[] = {
169 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
170};
171/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
172static uint32_t _std_init_vector_sha256[] = {
173 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
174 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
175};
176
177/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
178static const uint32_t _s_box[256] = {
179 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
180 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
181
182 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
183 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
184
185 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
186 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
187
188 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
189 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
190
191 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
192 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
193
194 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
195 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
196
197 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
198 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
199
200 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
201 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
202
203 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
204 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
205
206 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
207 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
208
209 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
210 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
211
212 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
213 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
214
215 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
216 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
217
218 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
219 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
220
221 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
222 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
223
224 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
225 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
226
227
228/*
229 * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
230 * Expansion.
231 */
232static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
233 uint32_t *AES_RND_KEY)
234{
235 uint32_t i;
236 uint32_t Nk;
237 uint32_t Nr, rot_data;
238 uint32_t Rcon = 0x01000000;
239 uint32_t temp;
240 uint32_t data_in;
241 uint32_t MSB_store;
242 uint32_t byte_for_sub;
243 uint32_t word_sub[4];
244
245 switch (keysize) {
246 case 192:
247 Nk = 6;
248 Nr = 12;
249 break;
250
251 case 256:
252 Nk = 8;
253 Nr = 14;
254 break;
255
256 case 128:
257 default: /* default to AES128 */
258 Nk = 4;
259 Nr = 10;
260 break;
261 }
262
263 /* key expansion */
264 i = 0;
265 while (i < Nk) {
266 AES_RND_KEY[i] = AES_KEY[i];
267 i = i + 1;
268 }
269
270 i = Nk;
271 while (i < (4 * (Nr + 1))) {
272 temp = AES_RND_KEY[i-1];
273 if (Nr == 14) {
274 switch (i) {
275 case 8:
276 Rcon = 0x01000000;
277 break;
278
279 case 16:
280 Rcon = 0x02000000;
281 break;
282
283 case 24:
284 Rcon = 0x04000000;
285 break;
286
287 case 32:
288 Rcon = 0x08000000;
289 break;
290
291 case 40:
292 Rcon = 0x10000000;
293 break;
294
295 case 48:
296 Rcon = 0x20000000;
297 break;
298
299 case 56:
300 Rcon = 0x40000000;
301 break;
302 }
303 } else if (Nr == 12) {
304 switch (i) {
305 case 6:
306 Rcon = 0x01000000;
307 break;
308
309 case 12:
310 Rcon = 0x02000000;
311 break;
312
313 case 18:
314 Rcon = 0x04000000;
315 break;
316
317 case 24:
318 Rcon = 0x08000000;
319 break;
320
321 case 30:
322 Rcon = 0x10000000;
323 break;
324
325 case 36:
326 Rcon = 0x20000000;
327 break;
328
329 case 42:
330 Rcon = 0x40000000;
331 break;
332
333 case 48:
334 Rcon = 0x80000000;
335 break;
336 }
337 } else if (Nr == 10) {
338 switch (i) {
339 case 4:
340 Rcon = 0x01000000;
341 break;
342
343 case 8:
344 Rcon = 0x02000000;
345 break;
346
347 case 12:
348 Rcon = 0x04000000;
349 break;
350
351 case 16:
352 Rcon = 0x08000000;
353 break;
354
355 case 20:
356 Rcon = 0x10000000;
357 break;
358
359 case 24:
360 Rcon = 0x20000000;
361 break;
362
363 case 28:
364 Rcon = 0x40000000;
365 break;
366
367 case 32:
368 Rcon = 0x80000000;
369 break;
370
371 case 36:
372 Rcon = 0x1b000000;
373 break;
374
375 case 40:
376 Rcon = 0x36000000;
377 break;
378 }
379 }
380
381 if ((i % Nk) == 0) {
382 data_in = temp;
383 MSB_store = (data_in >> 24 & 0xff);
384 rot_data = (data_in << 8) | MSB_store;
385 byte_for_sub = rot_data;
386 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
387 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
388 << 8);
389 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
390 << 16);
391 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
392 >> 24)] << 24);
393 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
394 word_sub[3];
395 temp = word_sub[0] ^ Rcon;
396 } else if ((Nk > 6) && ((i % Nk) == 4)) {
397 byte_for_sub = temp;
398 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
399 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
400 << 8);
401 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
402 << 16);
403 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
404 24)] << 24);
405 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
406 word_sub[3];
407 temp = word_sub[0];
408 }
409
410 AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
411 i = i+1;
412 }
413}
414
415static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
416 unsigned int len)
417{
418 unsigned n;
419
420 n = len / sizeof(uint32_t) ;
421 for (; n > 0; n--) {
422 *iv = ((*b << 24) & 0xff000000) |
423 (((*(b+1)) << 16) & 0xff0000) |
424 (((*(b+2)) << 8) & 0xff00) |
425 (*(b+3) & 0xff);
426 b += sizeof(uint32_t);
427 iv++;
428 }
429
430 n = len % sizeof(uint32_t);
431 if (n == 3) {
432 *iv = ((*b << 24) & 0xff000000) |
433 (((*(b+1)) << 16) & 0xff0000) |
434 (((*(b+2)) << 8) & 0xff00) ;
435 } else if (n == 2) {
436 *iv = ((*b << 24) & 0xff000000) |
437 (((*(b+1)) << 16) & 0xff0000) ;
438 } else if (n == 1) {
439 *iv = ((*b << 24) & 0xff000000) ;
440 }
441}
442
443static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
444 unsigned int len)
445{
446 unsigned n = len / sizeof(uint32_t);
447
448 for (; n > 0; n--) {
449 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
450 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
451 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
452 *b++ = (unsigned char) (*iv & 0xff);
453 iv++;
454 }
455 n = len % sizeof(uint32_t);
456 if (n == 3) {
457 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
458 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
459 *b = (unsigned char) ((*iv >> 8) & 0xff);
460 } else if (n == 2) {
461 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
462 *b = (unsigned char) ((*iv >> 16) & 0xff);
463 } else if (n == 1) {
464 *b = (unsigned char) ((*iv >> 24) & 0xff);
465 }
466}
467
468static int count_sg(struct scatterlist *sg, int nbytes)
469{
470 int i;
471
472 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
473 nbytes -= sg->length;
474 return i;
475}
476
477static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
478 struct scatterlist *sg)
479{
480 int i = 0;
481 for (i = 0; i < entries; i++) {
482
483 sg->dma_address = (dma_addr_t)pmem->offset;
484 sg++;
485 pmem++;
486 }
487 return 0;
488}
489
490static int _probe_ce_engine(struct qce_device *pce_dev)
491{
492 unsigned int val;
493 unsigned int rev;
494 unsigned int eng_availability; /* engine available functions */
495
496 val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
497 if ((val & 0xfffffff) != 0x0200004) {
498 dev_err(pce_dev->pdev,
499 "unknown Qualcomm crypto device at 0x%x 0x%x\n",
500 pce_dev->phy_iobase, val);
501 return -EIO;
502 };
503 rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
504 if (rev == 0x2) {
505 dev_info(pce_dev->pdev,
506 "Qualcomm Crypto 3e device found at 0x%x\n",
507 pce_dev->phy_iobase);
508 } else if (rev == 0x1) {
509 dev_info(pce_dev->pdev,
510 "Qualcomm Crypto 3 device found at 0x%x\n",
511 pce_dev->phy_iobase);
512 } else if (rev == 0x0) {
513 dev_info(pce_dev->pdev,
514 "Qualcomm Crypto 2 device found at 0x%x\n",
515 pce_dev->phy_iobase);
516 } else {
517 dev_err(pce_dev->pdev,
518 "unknown Qualcomm crypto device at 0x%x\n",
519 pce_dev->phy_iobase);
520 return -EIO;
521 }
522
523 eng_availability = readl_relaxed(pce_dev->iobase +
524 CRYPTO_ENGINES_AVAIL);
525
526 if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
527 == CRYPTO_AES_SEL_FAST)
528 pce_dev->fastaes = 1;
529 else
530 pce_dev->fastaes = 0;
531
532 if (eng_availability & (1 << CRYPTO_HMAC_SEL))
533 pce_dev->hmac = 1;
534 else
535 pce_dev->hmac = 0;
536
537 if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
538 (eng_availability & (1 << CRYPTO_F8_SEL)))
539 pce_dev->ota = true;
540 else
541 pce_dev->ota = false;
542
543 pce_dev->aes_key_size = 0;
544
545 return 0;
546};
547
548static int _init_ce_engine(struct qce_device *pce_dev)
549{
550 unsigned int val;
551
552 /* reset qce */
553 writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
554
555 /* Ensure previous instruction (write to reset bit)
556 * was completed.
557 */
558 mb();
559 /* configure ce */
560 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
561 (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
562 (1 << CRYPTO_MASK_ERR_INTR);
563 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
564
565 if (_probe_ce_engine(pce_dev) < 0)
566 return -EIO;
567 if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
568 dev_err(pce_dev->pdev,
569 "unknown Qualcomm crypto device at 0x%x\n",
570 pce_dev->phy_iobase);
571 return -EIO;
572 };
573 return 0;
574};
575
576static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
577{
578 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
579 uint32_t diglen;
580 int rc;
581 int i;
582 uint32_t cfg = 0;
583
584 /* if not the last, the size has to be on the block boundary */
585 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
586 return -EIO;
587
588 switch (sreq->alg) {
589 case QCE_HASH_SHA1:
590 diglen = SHA1_DIGEST_SIZE;
591 break;
592 case QCE_HASH_SHA256:
593 diglen = SHA256_DIGEST_SIZE;
594 break;
595 default:
596 return -EINVAL;
597 }
598 /*
599 * write 20/32 bytes, 5/8 words into auth_iv
600 * for SHA1/SHA256
601 */
602
603 if (sreq->first_blk) {
604 if (sreq->alg == QCE_HASH_SHA1) {
605 for (i = 0; i < 5; i++)
606 auth32[i] = _std_init_vector_sha1[i];
607 } else {
608 for (i = 0; i < 8; i++)
609 auth32[i] = _std_init_vector_sha256[i];
610 }
611 } else
612 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
613
614 rc = clk_enable(pce_dev->ce_clk);
615 if (rc)
616 return rc;
617
618 writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
619 writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
620 writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
621 writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
622 writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
623
624 if (sreq->alg == QCE_HASH_SHA256) {
625 writel_relaxed(auth32[5], pce_dev->iobase +
626 CRYPTO_AUTH_IV5_REG);
627 writel_relaxed(auth32[6], pce_dev->iobase +
628 CRYPTO_AUTH_IV6_REG);
629 writel_relaxed(auth32[7], pce_dev->iobase +
630 CRYPTO_AUTH_IV7_REG);
631 }
632 /* write auth_bytecnt 0/1, start with 0 */
633 writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
634 CRYPTO_AUTH_BYTECNT0_REG);
635 writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
636 CRYPTO_AUTH_BYTECNT1_REG);
637
638 /* write auth_seg_cfg */
639 writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
640 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
641
642 /*
643 * write seg_cfg
644 */
645
646 if (sreq->alg == QCE_HASH_SHA1)
647 cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
648 else
649 cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
650
651 if (sreq->first_blk)
652 cfg |= 1 << CRYPTO_FIRST;
653 if (sreq->last_blk)
654 cfg |= 1 << CRYPTO_LAST;
655 cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
656 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
657
658 /* write seg_size */
659 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
660
661 /* issue go to crypto */
662 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
663 /* Ensure previous instructions (setting the GO register)
664 * was completed before issuing a DMA transfer request
665 */
666 mb();
667
668 return 0;
669}
670
671static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
672 uint32_t totallen, uint32_t coffset)
673{
674 uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
675 0, 0, 0, 0, 0};
676 uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
677 0, 0, 0, 0, 0, 0, 0, 0};
678 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
679 0, 0, 0, 0};
680 uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
681 int aes_key_chg;
682 int i, rc;
683 uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
684 uint32_t cfg;
685 uint32_t ivsize = q_req->ivsize;
686
687 rc = clk_enable(pce_dev->ce_clk);
688 if (rc)
689 return rc;
690
691 cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
692 if (q_req->op == QCE_REQ_AEAD) {
693
694 /* do authentication setup */
695
696 cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
697 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
698
699 /* write sha1 init vector */
700 writel_relaxed(_std_init_vector_sha1[0],
701 pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
702 writel_relaxed(_std_init_vector_sha1[1],
703 pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
704 writel_relaxed(_std_init_vector_sha1[2],
705 pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
706 writel_relaxed(_std_init_vector_sha1[3],
707 pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
708 writel_relaxed(_std_init_vector_sha1[4],
709 pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
710 /* write hmac key */
711 _byte_stream_to_net_words(hmackey, q_req->authkey,
712 q_req->authklen);
713 writel_relaxed(hmackey[0], pce_dev->iobase +
714 CRYPTO_AUTH_IV5_REG);
715 writel_relaxed(hmackey[1], pce_dev->iobase +
716 CRYPTO_AUTH_IV6_REG);
717 writel_relaxed(hmackey[2], pce_dev->iobase +
718 CRYPTO_AUTH_IV7_REG);
719 writel_relaxed(hmackey[3], pce_dev->iobase +
720 CRYPTO_AUTH_IV8_REG);
721 writel_relaxed(hmackey[4], pce_dev->iobase +
722 CRYPTO_AUTH_IV9_REG);
723 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
724 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
725
726 /* write auth_seg_cfg */
727 writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
728 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
729
730 }
731
732 _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
733
734 switch (q_req->mode) {
735 case QCE_MODE_ECB:
736 cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
737 break;
738
739 case QCE_MODE_CBC:
740 cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
741 break;
742
743 case QCE_MODE_CTR:
744 default:
745 cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
746 break;
747 }
748 pce_dev->mode = q_req->mode;
749
750 switch (q_req->alg) {
751 case CIPHER_ALG_DES:
752 if (q_req->mode != QCE_MODE_ECB) {
753 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
754 writel_relaxed(enciv32[0], pce_dev->iobase +
755 CRYPTO_CNTR0_IV0_REG);
756 writel_relaxed(enciv32[1], pce_dev->iobase +
757 CRYPTO_CNTR1_IV1_REG);
758 }
759 writel_relaxed(enckey32[0], pce_dev->iobase +
760 CRYPTO_DES_KEY0_REG);
761 writel_relaxed(enckey32[1], pce_dev->iobase +
762 CRYPTO_DES_KEY1_REG);
763 cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
764 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
765 break;
766
767 case CIPHER_ALG_3DES:
768 if (q_req->mode != QCE_MODE_ECB) {
769 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
770 writel_relaxed(enciv32[0], pce_dev->iobase +
771 CRYPTO_CNTR0_IV0_REG);
772 writel_relaxed(enciv32[1], pce_dev->iobase +
773 CRYPTO_CNTR1_IV1_REG);
774 }
775 writel_relaxed(enckey32[0], pce_dev->iobase +
776 CRYPTO_DES_KEY0_REG);
777 writel_relaxed(enckey32[1], pce_dev->iobase +
778 CRYPTO_DES_KEY1_REG);
779 writel_relaxed(enckey32[2], pce_dev->iobase +
780 CRYPTO_DES_KEY2_REG);
781 writel_relaxed(enckey32[3], pce_dev->iobase +
782 CRYPTO_DES_KEY3_REG);
783 writel_relaxed(enckey32[4], pce_dev->iobase +
784 CRYPTO_DES_KEY4_REG);
785 writel_relaxed(enckey32[5], pce_dev->iobase +
786 CRYPTO_DES_KEY5_REG);
787 cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
788 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
789 break;
790
791 case CIPHER_ALG_AES:
792 default:
793 if (q_req->mode != QCE_MODE_ECB) {
794 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
795 writel_relaxed(enciv32[0], pce_dev->iobase +
796 CRYPTO_CNTR0_IV0_REG);
797 writel_relaxed(enciv32[1], pce_dev->iobase +
798 CRYPTO_CNTR1_IV1_REG);
799 writel_relaxed(enciv32[2], pce_dev->iobase +
800 CRYPTO_CNTR2_IV2_REG);
801 writel_relaxed(enciv32[3], pce_dev->iobase +
802 CRYPTO_CNTR3_IV3_REG);
803 }
804 /* set number of counter bits */
805 writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
806
807 if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
808 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
809 CRYPTO_ENCR_KEY_SZ);
810 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
811 } else {
812 switch (q_req->encklen) {
813 case AES128_KEY_SIZE:
814 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
815 CRYPTO_ENCR_KEY_SZ);
816 break;
817 case AES192_KEY_SIZE:
818 cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
819 CRYPTO_ENCR_KEY_SZ);
820 break;
821 case AES256_KEY_SIZE:
822 default:
823 cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
824 CRYPTO_ENCR_KEY_SZ);
825
826 /* check for null key. If null, use hw key*/
827 for (i = 0; i < enck_size_in_word; i++) {
828 if (enckey32[i] != 0)
829 break;
830 }
831 if (i == enck_size_in_word)
832 cfg |= 1 << CRYPTO_USE_HW_KEY;
833 break;
834 } /* end of switch (q_req->encklen) */
835
836 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
837 if (pce_dev->aes_key_size != q_req->encklen)
838 aes_key_chg = 1;
839 else {
840 for (i = 0; i < enck_size_in_word; i++) {
841 if (enckey32[i] != pce_dev->aeskey[i])
842 break;
843 }
844 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
845 }
846
847 if (aes_key_chg) {
848 if (pce_dev->fastaes) {
849 for (i = 0; i < enck_size_in_word;
850 i++) {
851 writel_relaxed(enckey32[i],
852 pce_dev->iobase +
853 CRYPTO_AES_RNDKEY0 +
854 (i * sizeof(uint32_t)));
855 }
856 } else {
857 /* size in bit */
858 _aes_expand_key_schedule(
859 q_req->encklen * 8,
860 enckey32, aes_round_key);
861
862 for (i = 0; i < CRYPTO_AES_RNDKEYS;
863 i++) {
864 writel_relaxed(aes_round_key[i],
865 pce_dev->iobase +
866 CRYPTO_AES_RNDKEY0 +
867 (i * sizeof(uint32_t)));
868 }
869 }
870
871 pce_dev->aes_key_size = q_req->encklen;
872 for (i = 0; i < enck_size_in_word; i++)
873 pce_dev->aeskey[i] = enckey32[i];
874 } /*if (aes_key_chg) { */
875 } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
876 break;
877 } /* end of switch (q_req->mode) */
878
879 if (q_req->dir == QCE_ENCRYPT)
880 cfg |= (1 << CRYPTO_AUTH_POS);
881 cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
882
883 /* write encr seg cfg */
884 writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
885 (coffset & 0xffff), /* cipher offset */
886 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
887
888 /* write seg cfg and size */
889 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
890 writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
891
892 /* issue go to crypto */
893 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
894 /* Ensure previous instructions (setting the GO register)
895 * was completed before issuing a DMA transfer request
896 */
897 mb();
898 return 0;
899};
900
901static int _aead_complete(struct qce_device *pce_dev)
902{
903 struct aead_request *areq;
904 struct crypto_aead *aead;
905 uint32_t ivsize;
906 uint32_t iv_out[4];
907 unsigned char iv[4 * sizeof(uint32_t)];
908 uint32_t status;
909
910 areq = (struct aead_request *) pce_dev->areq;
911 aead = crypto_aead_reqtfm(areq);
912 ivsize = crypto_aead_ivsize(aead);
913
914 if (areq->src != areq->dst) {
915 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
916 DMA_FROM_DEVICE);
917 }
918 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
919 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
920 DMA_TO_DEVICE);
921 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
922 ivsize, DMA_TO_DEVICE);
923 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
924 DMA_TO_DEVICE);
925
926 /* check ce error status */
927 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
928 if (status & (1 << CRYPTO_SW_ERR)) {
929 pce_dev->err++;
930 dev_err(pce_dev->pdev,
931 "Qualcomm Crypto Error at 0x%x, status%x\n",
932 pce_dev->phy_iobase, status);
933 _init_ce_engine(pce_dev);
934 clk_disable(pce_dev->ce_clk);
935 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
936 return 0;
937 };
938
939 /* get iv out */
940 if (pce_dev->mode == QCE_MODE_ECB) {
941 clk_disable(pce_dev->ce_clk);
942 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
943 pce_dev->chan_ce_in_status |
944 pce_dev->chan_ce_out_status);
945 } else {
946
947 iv_out[0] = readl_relaxed(pce_dev->iobase +
948 CRYPTO_CNTR0_IV0_REG);
949 iv_out[1] = readl_relaxed(pce_dev->iobase +
950 CRYPTO_CNTR1_IV1_REG);
951 iv_out[2] = readl_relaxed(pce_dev->iobase +
952 CRYPTO_CNTR2_IV2_REG);
953 iv_out[3] = readl_relaxed(pce_dev->iobase +
954 CRYPTO_CNTR3_IV3_REG);
955
956 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
957 clk_disable(pce_dev->ce_clk);
958 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
959 pce_dev->chan_ce_in_status |
960 pce_dev->chan_ce_out_status);
961 };
962 return 0;
963};
964
965static void _sha_complete(struct qce_device *pce_dev)
966{
967
968 struct ahash_request *areq;
969 uint32_t auth_data[2];
970 uint32_t status;
971
972 areq = (struct ahash_request *) pce_dev->areq;
973 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
974 DMA_TO_DEVICE);
975
976 /* check ce error status */
977 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
978 if (status & (1 << CRYPTO_SW_ERR)) {
979 pce_dev->err++;
980 dev_err(pce_dev->pdev,
981 "Qualcomm Crypto Error at 0x%x, status%x\n",
982 pce_dev->phy_iobase, status);
983 _init_ce_engine(pce_dev);
984 clk_disable(pce_dev->ce_clk);
985 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
986 return;
987 };
988
989 auth_data[0] = readl_relaxed(pce_dev->iobase +
990 CRYPTO_AUTH_BYTECNT0_REG);
991 auth_data[1] = readl_relaxed(pce_dev->iobase +
992 CRYPTO_AUTH_BYTECNT1_REG);
993 /* Ensure previous instruction (retriving byte count information)
994 * was completed before disabling the clk.
995 */
996 mb();
997 clk_disable(pce_dev->ce_clk);
998 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
999 pce_dev->chan_ce_in_status);
1000};
1001
1002static int _ablk_cipher_complete(struct qce_device *pce_dev)
1003{
1004 struct ablkcipher_request *areq;
1005 uint32_t iv_out[4];
1006 unsigned char iv[4 * sizeof(uint32_t)];
1007 uint32_t status;
1008
1009 areq = (struct ablkcipher_request *) pce_dev->areq;
1010
1011 if (areq->src != areq->dst) {
1012 dma_unmap_sg(pce_dev->pdev, areq->dst,
1013 pce_dev->dst_nents, DMA_FROM_DEVICE);
1014 }
1015 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1016 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1017 DMA_TO_DEVICE);
1018
1019 /* check ce error status */
1020 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1021 if (status & (1 << CRYPTO_SW_ERR)) {
1022 pce_dev->err++;
1023 dev_err(pce_dev->pdev,
1024 "Qualcomm Crypto Error at 0x%x, status%x\n",
1025 pce_dev->phy_iobase, status);
1026 _init_ce_engine(pce_dev);
1027 clk_disable(pce_dev->ce_clk);
1028 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1029 return 0;
1030 };
1031
1032 /* get iv out */
1033 if (pce_dev->mode == QCE_MODE_ECB) {
1034 clk_disable(pce_dev->ce_clk);
1035 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1036 pce_dev->chan_ce_out_status);
1037 } else {
1038 iv_out[0] = readl_relaxed(pce_dev->iobase +
1039 CRYPTO_CNTR0_IV0_REG);
1040 iv_out[1] = readl_relaxed(pce_dev->iobase +
1041 CRYPTO_CNTR1_IV1_REG);
1042 iv_out[2] = readl_relaxed(pce_dev->iobase +
1043 CRYPTO_CNTR2_IV2_REG);
1044 iv_out[3] = readl_relaxed(pce_dev->iobase +
1045 CRYPTO_CNTR3_IV3_REG);
1046
1047 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1048 clk_disable(pce_dev->ce_clk);
1049 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1050 pce_dev->chan_ce_out_status);
1051 }
1052
1053 return 0;
1054};
1055
1056static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
1057{
1058 struct ablkcipher_request *areq;
1059 uint32_t iv_out[4];
1060 unsigned char iv[4 * sizeof(uint32_t)];
1061 uint32_t status;
1062
1063 areq = (struct ablkcipher_request *) pce_dev->areq;
1064
1065 /* check ce error status */
1066 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1067 if (status & (1 << CRYPTO_SW_ERR)) {
1068 pce_dev->err++;
1069 dev_err(pce_dev->pdev,
1070 "Qualcomm Crypto Error at 0x%x, status%x\n",
1071 pce_dev->phy_iobase, status);
1072 _init_ce_engine(pce_dev);
1073 clk_disable(pce_dev->ce_clk);
1074 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1075 return 0;
1076 };
1077
1078 /* get iv out */
1079 if (pce_dev->mode == QCE_MODE_ECB) {
1080 clk_disable(pce_dev->ce_clk);
1081 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1082 pce_dev->chan_ce_out_status);
1083 } else {
1084 iv_out[0] = readl_relaxed(pce_dev->iobase +
1085 CRYPTO_CNTR0_IV0_REG);
1086 iv_out[1] = readl_relaxed(pce_dev->iobase +
1087 CRYPTO_CNTR1_IV1_REG);
1088 iv_out[2] = readl_relaxed(pce_dev->iobase +
1089 CRYPTO_CNTR2_IV2_REG);
1090 iv_out[3] = readl_relaxed(pce_dev->iobase +
1091 CRYPTO_CNTR3_IV3_REG);
1092
1093 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1094 clk_disable(pce_dev->ce_clk);
1095 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1096 pce_dev->chan_ce_out_status);
1097 }
1098
1099 return 0;
1100};
1101
1102
1103
1104static int _chain_sg_buffer_in(struct qce_device *pce_dev,
1105 struct scatterlist *sg, unsigned int nbytes)
1106{
1107 unsigned int len;
1108 unsigned int dlen;
1109 struct dmov_desc *pdesc;
1110
1111 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1112 /*
1113 * Two consective chunks may be handled by the old
1114 * buffer descriptor.
1115 */
1116 while (nbytes > 0) {
1117 len = min(nbytes, sg_dma_len(sg));
1118 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1119 nbytes -= len;
1120 if (dlen == 0) {
1121 pdesc->addr = sg_dma_address(sg);
1122 pdesc->len = len;
1123 } else if (sg_dma_address(sg) == (pdesc->addr + dlen))
1124 pdesc->len = dlen + len;
1125 else {
1126 pce_dev->ce_in_src_desc_index++;
1127 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1128 return -ENOMEM;
1129 pdesc++;
1130 pdesc->len = len;
1131 pdesc->addr = sg_dma_address(sg);
1132 }
1133 if (nbytes > 0)
1134 sg = sg_next(sg);
1135 }
1136 return 0;
1137}
1138
1139static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1140 unsigned int pmem, unsigned int nbytes)
1141{
1142 unsigned int dlen;
1143 struct dmov_desc *pdesc;
1144
1145 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1146 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1147 if (dlen == 0) {
1148 pdesc->addr = pmem;
1149 pdesc->len = nbytes;
1150 } else if (pmem == (pdesc->addr + dlen)) {
1151 pdesc->len = dlen + nbytes;
1152 } else {
1153 pce_dev->ce_in_src_desc_index++;
1154 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1155 return -ENOMEM;
1156 pdesc++;
1157 pdesc->len = nbytes;
1158 pdesc->addr = pmem;
1159 }
1160 return 0;
1161}
1162
1163static void _chain_buffer_in_init(struct qce_device *pce_dev)
1164{
1165 struct dmov_desc *pdesc;
1166
1167 pce_dev->ce_in_src_desc_index = 0;
1168 pdesc = pce_dev->ce_in_src_desc;
1169 pdesc->len = 0;
1170}
1171
1172static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1173{
1174 struct dmov_desc *pdesc;
1175 dmov_sg *pcmd;
1176
1177 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1178 pdesc->len |= ADM_DESC_LAST;
1179 pdesc = pce_dev->ce_in_dst_desc;
1180 pdesc->len = ADM_DESC_LAST | total;
1181
1182 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1183 if (ncmd == 1)
1184 pcmd->cmd |= CMD_LC;
1185 else {
1186 dmov_s *pscmd;
1187
1188 pcmd->cmd &= ~CMD_LC;
1189 pcmd++;
1190 pscmd = (dmov_s *)pcmd;
1191 pscmd->cmd |= CMD_LC;
1192 }
1193
1194#ifdef QCE_DEBUG
1195 dev_info(pce_dev->pdev, "_ce_in_final %d\n",
1196 pce_dev->ce_in_src_desc_index);
1197#endif
1198}
1199
1200#ifdef QCE_DEBUG
1201static void _ce_in_dump(struct qce_device *pce_dev)
1202{
1203 int i;
1204 struct dmov_desc *pdesc;
1205
1206 dev_info(pce_dev->pdev, "_ce_in_dump\n");
1207 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1208 pdesc = pce_dev->ce_in_src_desc + i;
1209 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1210 pdesc->len);
1211 }
1212 pdesc = pce_dev->ce_in_dst_desc;
1213 dev_info(pce_dev->pdev, "dst - %x , %x\n", pdesc->addr,
1214 pdesc->len);
1215};
1216
1217static void _ce_out_dump(struct qce_device *pce_dev)
1218{
1219 int i;
1220 struct dmov_desc *pdesc;
1221
1222 dev_info(pce_dev->pdev, "_ce_out_dump\n");
1223 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1224 pdesc = pce_dev->ce_out_dst_desc + i;
1225 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1226 pdesc->len);
1227 }
1228 pdesc = pce_dev->ce_out_src_desc;
1229 dev_info(pce_dev->pdev, "src - %x , %x\n", pdesc->addr,
1230 pdesc->len);
1231};
1232#endif
1233
1234static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1235 struct scatterlist *sg, unsigned int nbytes)
1236{
1237 unsigned int len;
1238 unsigned int dlen;
1239 struct dmov_desc *pdesc;
1240
1241 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1242 /*
1243 * Two consective chunks may be handled by the old
1244 * buffer descriptor.
1245 */
1246 while (nbytes > 0) {
1247 len = min(nbytes, sg_dma_len(sg));
1248 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1249 nbytes -= len;
1250 if (dlen == 0) {
1251 pdesc->addr = sg_dma_address(sg);
1252 pdesc->len = len;
1253 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1254 pdesc->len = dlen + len;
1255 } else {
1256 pce_dev->ce_out_dst_desc_index++;
1257 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1258 return -EIO;
1259 pdesc++;
1260 pdesc->len = len;
1261 pdesc->addr = sg_dma_address(sg);
1262 }
1263 if (nbytes > 0)
1264 sg = sg_next(sg);
1265 }
1266 return 0;
1267}
1268
1269static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1270 unsigned int pmem, unsigned int nbytes)
1271{
1272 unsigned int dlen;
1273 struct dmov_desc *pdesc;
1274
1275 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1276 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1277
1278 if (dlen == 0) {
1279 pdesc->addr = pmem;
1280 pdesc->len = nbytes;
1281 } else if (pmem == (pdesc->addr + dlen)) {
1282 pdesc->len = dlen + nbytes;
1283 } else {
1284 pce_dev->ce_out_dst_desc_index++;
1285 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1286 return -EIO;
1287 pdesc++;
1288 pdesc->len = nbytes;
1289 pdesc->addr = pmem;
1290 }
1291 return 0;
1292};
1293
1294static void _chain_buffer_out_init(struct qce_device *pce_dev)
1295{
1296 struct dmov_desc *pdesc;
1297
1298 pce_dev->ce_out_dst_desc_index = 0;
1299 pdesc = pce_dev->ce_out_dst_desc;
1300 pdesc->len = 0;
1301};
1302
1303static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1304{
1305 struct dmov_desc *pdesc;
1306 dmov_sg *pcmd;
1307
1308 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1309 pdesc->len |= ADM_DESC_LAST;
1310 pdesc = pce_dev->ce_out_src_desc;
1311 pdesc->len = ADM_DESC_LAST | total;
1312 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1313 if (ncmd == 1)
1314 pcmd->cmd |= CMD_LC;
1315 else {
1316 dmov_s *pscmd;
1317
1318 pcmd->cmd &= ~CMD_LC;
1319 pcmd++;
1320 pscmd = (dmov_s *)pcmd;
1321 pscmd->cmd |= CMD_LC;
1322 }
1323#ifdef QCE_DEBUG
1324 dev_info(pce_dev->pdev, "_ce_out_final %d\n",
1325 pce_dev->ce_out_dst_desc_index);
1326#endif
1327
1328};
1329
1330static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1331 unsigned int result, struct msm_dmov_errdata *err)
1332{
1333 struct qce_device *pce_dev;
1334
1335 pce_dev = (struct qce_device *) cmd_ptr->user;
1336 if (result != ADM_STATUS_OK) {
1337 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1338 result);
1339 pce_dev->chan_ce_in_status = -1;
1340 } else
1341 pce_dev->chan_ce_in_status = 0;
1342
1343 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1344 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1345 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1346 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1347
1348 /* done */
1349 _aead_complete(pce_dev);
1350 }
1351};
1352
1353static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1354 unsigned int result, struct msm_dmov_errdata *err)
1355{
1356 struct qce_device *pce_dev;
1357
1358 pce_dev = (struct qce_device *) cmd_ptr->user;
1359 if (result != ADM_STATUS_OK) {
1360 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1361 result);
1362 pce_dev->chan_ce_out_status = -1;
1363 } else {
1364 pce_dev->chan_ce_out_status = 0;
1365 };
1366
1367 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1368 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1369 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1370 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1371
1372 /* done */
1373 _aead_complete(pce_dev);
1374 }
1375
1376};
1377
1378static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1379 unsigned int result, struct msm_dmov_errdata *err)
1380{
1381 struct qce_device *pce_dev;
1382
1383 pce_dev = (struct qce_device *) cmd_ptr->user;
1384 if (result != ADM_STATUS_OK) {
1385 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1386 result);
1387 pce_dev->chan_ce_in_status = -1;
1388 } else
1389 pce_dev->chan_ce_in_status = 0;
1390 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1391 _sha_complete(pce_dev);
1392};
1393
1394static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1395 unsigned int result, struct msm_dmov_errdata *err)
1396{
1397 struct qce_device *pce_dev;
1398
1399 pce_dev = (struct qce_device *) cmd_ptr->user;
1400 if (result != ADM_STATUS_OK) {
1401 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1402 result);
1403 pce_dev->chan_ce_in_status = -1;
1404 } else
1405 pce_dev->chan_ce_in_status = 0;
1406
1407 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1408 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1409 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1410 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1411
1412 /* done */
1413 _ablk_cipher_complete(pce_dev);
1414 }
1415};
1416
1417static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1418 unsigned int result, struct msm_dmov_errdata *err)
1419{
1420 struct qce_device *pce_dev;
1421
1422 pce_dev = (struct qce_device *) cmd_ptr->user;
1423 if (result != ADM_STATUS_OK) {
1424 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1425 result);
1426 pce_dev->chan_ce_out_status = -1;
1427 } else {
1428 pce_dev->chan_ce_out_status = 0;
1429 };
1430
1431 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1432 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1433 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1434 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1435
1436 /* done */
1437 _ablk_cipher_complete(pce_dev);
1438 }
1439};
1440
1441
1442static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1443 unsigned int result, struct msm_dmov_errdata *err)
1444{
1445 struct qce_device *pce_dev;
1446
1447 pce_dev = (struct qce_device *) cmd_ptr->user;
1448 if (result != ADM_STATUS_OK) {
1449 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1450 result);
1451 pce_dev->chan_ce_in_status = -1;
1452 } else
1453 pce_dev->chan_ce_in_status = 0;
1454
1455 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1456 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1457 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1458 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1459
1460 /* done */
1461 _ablk_cipher_use_pmem_complete(pce_dev);
1462 }
1463};
1464
1465static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1466 unsigned int result, struct msm_dmov_errdata *err)
1467{
1468 struct qce_device *pce_dev;
1469
1470 pce_dev = (struct qce_device *) cmd_ptr->user;
1471 if (result != ADM_STATUS_OK) {
1472 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1473 result);
1474 pce_dev->chan_ce_out_status = -1;
1475 } else {
1476 pce_dev->chan_ce_out_status = 0;
1477 };
1478
1479 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1480 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1481 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1482 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1483
1484 /* done */
1485 _ablk_cipher_use_pmem_complete(pce_dev);
1486 }
1487};
1488
1489static int _setup_cmd_template(struct qce_device *pce_dev)
1490{
1491 dmov_sg *pcmd;
1492 dmov_s *pscmd;
1493 struct dmov_desc *pdesc;
1494 unsigned char *vaddr;
1495
1496 /* Divide up the 4K coherent memory */
1497 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1498 vaddr = pce_dev->coh_vmem;
1499 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1500 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1501 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1502 (vaddr - pce_dev->coh_vmem);
1503 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1504
1505 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
1506 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1507 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1508 (vaddr - pce_dev->coh_vmem);
1509 vaddr = vaddr + sizeof(struct dmov_desc) ;
1510
1511 /*
1512 * 3. ce_in channel command list of one scatter gather command
1513 * and one simple command.
1514 */
1515 pce_dev->cmd_list_ce_in = vaddr;
1516 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1517 + (vaddr - pce_dev->coh_vmem);
1518 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1519
1520 /* 4. authentication result. */
1521 pce_dev->dig_result = vaddr;
1522 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1523 (vaddr - pce_dev->coh_vmem);
1524 vaddr = vaddr + SHA256_DIGESTSIZE;
1525
1526 /*
1527 * 5. ce_out channel command list of one scatter gather command
1528 * and one simple command.
1529 */
1530 pce_dev->cmd_list_ce_out = vaddr;
1531 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1532 + (vaddr - pce_dev->coh_vmem);
1533 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1534
1535 /* 6. ce_out channel command src descriptors, 1 entry */
1536 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1537 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1538 + (vaddr - pce_dev->coh_vmem);
1539 vaddr = vaddr + sizeof(struct dmov_desc) ;
1540
1541 /* 7. ce_out channel command dst descriptors, 128 entries. */
1542 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1543 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1544 + (vaddr - pce_dev->coh_vmem);
1545 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1546
1547 /* 8. pad area. */
1548 pce_dev->ce_pad = vaddr;
1549 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1550 (vaddr - pce_dev->coh_vmem);
1551 vaddr = vaddr + ADM_CE_BLOCK_SIZE;
1552
1553 /* 9. ce_in channel command pointer list. */
1554 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1555 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1556 (vaddr - pce_dev->coh_vmem);
1557 vaddr = vaddr + sizeof(unsigned char *);
1558 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 8);
1559
1560 /* 10. ce_ou channel command pointer list. */
1561 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1562 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1563 (vaddr - pce_dev->coh_vmem);
1564 vaddr = vaddr + sizeof(unsigned char *);
1565
1566 /* 11. throw away area to store by-pass data from ce_out. */
1567 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1568 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1569 + (vaddr - pce_dev->coh_vmem);
1570 pce_dev->ce_out_ignore_size = PAGE_SIZE - (vaddr -
1571 pce_dev->coh_vmem); /* at least 1.5 K of space */
1572 /*
1573 * The first command of command list ce_in is for the input of
1574 * concurrent operation of encrypt/decrypt or for the input
1575 * of authentication.
1576 */
1577 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1578 /* swap byte and half word , dst crci , scatter gather */
1579 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1580 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1581 pdesc = pce_dev->ce_in_src_desc;
1582 pdesc->addr = 0; /* to be filled in each operation */
1583 pdesc->len = 0; /* to be filled in each operation */
1584 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
1585 pdesc = pce_dev->ce_in_dst_desc;
1586 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1587 pdesc->len = 0 | ADM_DESC_LAST; /* to be filled in each operation */
1588 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1589 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1590 DST_INDEX_SG_CMD(0);
1591 pcmd++;
1592 /*
1593 * The second command is for the digested data of
1594 * hashing operation only. For others, this command is not used.
1595 */
1596 pscmd = (dmov_s *) pcmd;
1597 /* last command, swap byte, half word, src crci, single */
1598 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1599 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1600 pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1601 pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
1602 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1603 /* setup command pointer list */
1604 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1605 DMOV_CMD_ADDR((unsigned int)
1606 pce_dev->phy_cmd_list_ce_in));
1607 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1608 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1609 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1610 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1611 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1612 pce_dev->crci_in, pce_dev->crci_hash);
1613 /*
1614 * The first command in the command list ce_out.
1615 * It is for encry/decryp output.
1616 * If hashing only, ce_out is not used.
1617 */
1618 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1619 /* swap byte, half word, source crci, scatter gather */
1620 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1621 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
1622 pdesc = pce_dev->ce_out_src_desc;
1623 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1624 pdesc->len = 0; /* to be filled in each opeation */
1625 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
1626 pdesc = pce_dev->ce_out_dst_desc;
1627 pdesc->addr = 0; /* to be filled in each opeation */
1628 pdesc->len = 0; /* to be filled in each opeation */
1629 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1630 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1631 DST_INDEX_SG_CMD(0);
1632 pcmd++;
1633 /*
1634 * The second command is for digested data of esp operation.
1635 * For ciphering, this command is not used.
1636 */
1637 pscmd = (dmov_s *) pcmd;
1638 /* last command, swap byte, half word, src crci, single */
1639 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1640 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1641 pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1642 pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
1643 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1644 /* setup command pointer list */
1645 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1646 DMOV_CMD_ADDR((unsigned int)pce_dev->
1647 phy_cmd_list_ce_out));
1648
1649 pce_dev->chan_ce_out_cmd->user = pce_dev;
1650 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1651 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1652 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1653 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1654 pce_dev->crci_out, pce_dev->crci_hash);
1655
1656
1657 return 0;
1658};
1659
1660static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1661{
1662
1663 if (ce_in)
1664 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1665 else
1666 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1667
1668 if (ce_out)
1669 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1670 else
1671 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1672
1673 if (ce_in)
1674 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1675 pce_dev->chan_ce_in_cmd);
1676 if (ce_out)
1677 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1678 pce_dev->chan_ce_out_cmd);
1679
1680 return 0;
1681};
1682
1683static void _f9_complete(struct qce_device *pce_dev)
1684{
1685 uint32_t mac_i;
1686 uint32_t status;
1687
1688 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1689 pce_dev->ota_size, DMA_TO_DEVICE);
1690
1691 /* check ce error status */
1692 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1693 if (status & (1 << CRYPTO_SW_ERR)) {
1694 pce_dev->err++;
1695 dev_err(pce_dev->pdev,
1696 "Qualcomm Crypto Error at 0x%x, status%x\n",
1697 pce_dev->phy_iobase, status);
1698 _init_ce_engine(pce_dev);
1699 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1700 return;
1701 };
1702
1703 mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1704 pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
1705 pce_dev->chan_ce_in_status);
1706};
1707
1708static void _f8_complete(struct qce_device *pce_dev)
1709{
1710 uint32_t status;
1711
1712 if (pce_dev->phy_ota_dst != 0)
1713 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
1714 pce_dev->ota_size, DMA_FROM_DEVICE);
1715 if (pce_dev->phy_ota_src != 0)
1716 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1717 pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
1718 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
1719
1720 /* check ce error status */
1721 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1722 if (status & (1 << CRYPTO_SW_ERR)) {
1723 pce_dev->err++;
1724 dev_err(pce_dev->pdev,
1725 "Qualcomm Crypto Error at 0x%x, status%x\n",
1726 pce_dev->phy_iobase, status);
1727 _init_ce_engine(pce_dev);
1728 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1729 return;
1730 };
1731
1732 pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
1733 pce_dev->chan_ce_in_status |
1734 pce_dev->chan_ce_out_status);
1735};
1736
1737
1738static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1739 unsigned int result, struct msm_dmov_errdata *err)
1740{
1741 struct qce_device *pce_dev;
1742
1743 pce_dev = (struct qce_device *) cmd_ptr->user;
1744 if (result != ADM_STATUS_OK) {
1745 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1746 result);
1747 pce_dev->chan_ce_in_status = -1;
1748 } else
1749 pce_dev->chan_ce_in_status = 0;
1750 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1751 _f9_complete(pce_dev);
1752};
1753
1754static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1755 unsigned int result, struct msm_dmov_errdata *err)
1756{
1757 struct qce_device *pce_dev;
1758
1759 pce_dev = (struct qce_device *) cmd_ptr->user;
1760 if (result != ADM_STATUS_OK) {
1761 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1762 result);
1763 pce_dev->chan_ce_in_status = -1;
1764 } else
1765 pce_dev->chan_ce_in_status = 0;
1766
1767 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1768 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1769 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1770 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1771
1772 /* done */
1773 _f8_complete(pce_dev);
1774 }
1775};
1776
1777static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1778 unsigned int result, struct msm_dmov_errdata *err)
1779{
1780 struct qce_device *pce_dev;
1781
1782 pce_dev = (struct qce_device *) cmd_ptr->user;
1783 if (result != ADM_STATUS_OK) {
1784 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1785 result);
1786 pce_dev->chan_ce_out_status = -1;
1787 } else {
1788 pce_dev->chan_ce_out_status = 0;
1789 };
1790
1791 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1792 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1793 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1794 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1795
1796 /* done */
1797 _f8_complete(pce_dev);
1798 }
1799};
1800
1801static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
1802{
1803 uint32_t cfg;
1804 uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
1805
1806 _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
1807 writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1808 writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
1809 writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
1810 writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
1811 writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
1812
1813 writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1814 writel_relaxed(req->count_i, pce_dev->iobase +
1815 CRYPTO_AUTH_BYTECNT1_REG);
1816
1817 /* write auth_seg_cfg */
1818 writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
1819 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1820
1821 /* write seg_cfg */
1822 cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
1823 (1 << CRYPTO_LAST);
1824
1825 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1826 cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
1827 else
1828 cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
1829
1830 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1831 cfg |= 1 << CRYPTO_F9_DIRECTION;
1832
1833 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1834
1835 /* write seg_size */
1836 writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1837
1838 /* issue go to crypto */
1839 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1840
1841 /*
1842 * barrier to ensure previous instructions
1843 * (including GO) to CE finish before issue DMA transfer
1844 * request.
1845 */
1846 mb();
1847 return 0;
1848};
1849
1850static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1851 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1852 uint16_t cipher_size)
1853{
1854 uint32_t cfg;
1855 uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
1856
1857 if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
1858 (req->bearer >= QCE_OTA_MAX_BEARER))
1859 return -EINVAL;
1860
1861 /* write seg_cfg */
1862 cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
1863 (1 << CRYPTO_LAST);
1864 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1865 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
1866 else
1867 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
1868 if (key_stream_mode)
1869 cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
1870 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1871 cfg |= 1 << CRYPTO_F8_DIRECTION;
1872 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1873
1874 /* write seg_size */
1875 writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1876
1877 /* write 0 to auth_size, auth_offset */
1878 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1879
1880 /* write encr_seg_cfg seg_size, seg_offset */
1881 writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
1882 (cipher_offset & 0xffff),
1883 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1884
1885 /* write keys */
1886 _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
1887 writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
1888 writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
1889 writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
1890 writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
1891
1892 /* write cntr0_iv0 for countC */
1893 writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
1894
1895 /* write cntr1_iv1 for nPkts, and bearer */
1896 if (npkts == 1)
1897 npkts = 0;
1898 writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
1899 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
1900 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
1901
1902 /* issue go to crypto */
1903 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1904
1905 /*
1906 * barrier to ensure previous instructions
1907 * (including GO) to CE finish before issue DMA transfer
1908 * request.
1909 */
1910 mb();
1911 return 0;
1912};
1913
1914int qce_aead_req(void *handle, struct qce_req *q_req)
1915{
1916 struct qce_device *pce_dev = (struct qce_device *) handle;
1917 struct aead_request *areq = (struct aead_request *) q_req->areq;
1918 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1919 uint32_t ivsize = crypto_aead_ivsize(aead);
1920 uint32_t totallen;
1921 uint32_t pad_len;
1922 uint32_t authsize = crypto_aead_authsize(aead);
1923 int rc = 0;
1924
1925 q_req->ivsize = ivsize;
1926 if (q_req->dir == QCE_ENCRYPT)
1927 q_req->cryptlen = areq->cryptlen;
1928 else
1929 q_req->cryptlen = areq->cryptlen - authsize;
1930
1931 totallen = q_req->cryptlen + ivsize + areq->assoclen;
1932 pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
1933
1934 _chain_buffer_in_init(pce_dev);
1935 _chain_buffer_out_init(pce_dev);
1936
1937 pce_dev->assoc_nents = 0;
1938 pce_dev->phy_iv_in = 0;
1939 pce_dev->src_nents = 0;
1940 pce_dev->dst_nents = 0;
1941
1942 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
1943 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
1944 DMA_TO_DEVICE);
1945 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
1946 rc = -ENOMEM;
1947 goto bad;
1948 }
1949
1950 /* cipher iv for input */
1951 pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
1952 ivsize, DMA_TO_DEVICE);
1953 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
1954 rc = -ENOMEM;
1955 goto bad;
1956 }
1957
1958 /* for output, ignore associated data and cipher iv */
1959 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
1960 ivsize + areq->assoclen) < 0) {
1961 rc = -ENOMEM;
1962 goto bad;
1963 }
1964
1965 /* cipher input */
1966 pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
1967 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1968 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1969 DMA_TO_DEVICE);
1970 if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
1971 rc = -ENOMEM;
1972 goto bad;
1973 }
1974
1975 /* cipher output */
1976 if (areq->src != areq->dst) {
1977 pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
1978 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
1979 DMA_FROM_DEVICE);
1980 };
1981 if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
1982 rc = -ENOMEM;
1983 goto bad;
1984 }
1985
1986 /* pad data */
1987 if (pad_len) {
1988 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
1989 pad_len) < 0) {
1990 rc = -ENOMEM;
1991 goto bad;
1992 }
1993 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
1994 pad_len) < 0) {
1995 rc = -ENOMEM;
1996 goto bad;
1997 }
1998 }
1999
2000 /* finalize the ce_in and ce_out channels command lists */
2001 _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2002 _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2003
2004 /* set up crypto device */
2005 rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
2006 if (rc < 0)
2007 goto bad;
2008
2009 /* setup for callback, and issue command to adm */
2010 pce_dev->areq = q_req->areq;
2011 pce_dev->qce_cb = q_req->qce_cb;
2012
2013 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
2014 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
2015
2016 rc = _qce_start_dma(pce_dev, true, true);
2017 if (rc == 0)
2018 return 0;
2019bad:
2020 if (pce_dev->assoc_nents) {
2021 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2022 DMA_TO_DEVICE);
2023 }
2024 if (pce_dev->phy_iv_in) {
2025 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
2026 ivsize, DMA_TO_DEVICE);
2027 }
2028 if (pce_dev->src_nents) {
2029 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2030 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2031 DMA_TO_DEVICE);
2032 }
2033 if (pce_dev->dst_nents) {
2034 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2035 DMA_FROM_DEVICE);
2036 }
2037 return rc;
2038}
2039EXPORT_SYMBOL(qce_aead_req);
2040
2041int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
2042{
2043 int rc = 0;
2044 struct qce_device *pce_dev = (struct qce_device *) handle;
2045 struct ablkcipher_request *areq = (struct ablkcipher_request *)
2046 c_req->areq;
2047
2048 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
2049 - areq->nbytes;
2050
2051 _chain_buffer_in_init(pce_dev);
2052 _chain_buffer_out_init(pce_dev);
2053
2054 pce_dev->src_nents = 0;
2055 pce_dev->dst_nents = 0;
2056 /* cipher input */
2057 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
2058
2059 if (c_req->use_pmem != 1)
2060 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2061 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2062 DMA_TO_DEVICE);
2063 else
2064 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
2065 areq->src);
2066
2067 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
2068 rc = -ENOMEM;
2069 goto bad;
2070 }
2071
2072 /* cipher output */
2073 if (areq->src != areq->dst) {
2074 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
2075 if (c_req->use_pmem != 1)
2076 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2077 DMA_FROM_DEVICE);
2078 else
2079 dma_map_pmem_sg(&c_req->pmem->dst[0],
2080 pce_dev->dst_nents, areq->dst);
2081 };
2082 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
2083 rc = -ENOMEM;
2084 goto bad;
2085 }
2086
2087 /* pad data */
2088 if (pad_len) {
2089 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2090 pad_len) < 0) {
2091 rc = -ENOMEM;
2092 goto bad;
2093 }
2094 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2095 pad_len) < 0) {
2096 rc = -ENOMEM;
2097 goto bad;
2098 }
2099 }
2100
2101 /* finalize the ce_in and ce_out channels command lists */
2102 _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
2103 _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
2104
2105#ifdef QCE_DEBUG
2106 _ce_in_dump(pce_dev);
2107 _ce_out_dump(pce_dev);
2108#endif
2109 /* set up crypto device */
2110 rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
2111 if (rc < 0)
2112 goto bad;
2113
2114 /* setup for callback, and issue command to adm */
2115 pce_dev->areq = areq;
2116 pce_dev->qce_cb = c_req->qce_cb;
2117 if (c_req->use_pmem == 1) {
2118 pce_dev->chan_ce_in_cmd->complete_func =
2119 _ablk_cipher_ce_in_call_back_pmem;
2120 pce_dev->chan_ce_out_cmd->complete_func =
2121 _ablk_cipher_ce_out_call_back_pmem;
2122 } else {
2123 pce_dev->chan_ce_in_cmd->complete_func =
2124 _ablk_cipher_ce_in_call_back;
2125 pce_dev->chan_ce_out_cmd->complete_func =
2126 _ablk_cipher_ce_out_call_back;
2127 }
2128 rc = _qce_start_dma(pce_dev, true, true);
2129
2130 if (rc == 0)
2131 return 0;
2132bad:
2133 if (c_req->use_pmem != 1) {
2134 if (pce_dev->dst_nents) {
2135 dma_unmap_sg(pce_dev->pdev, areq->dst,
2136 pce_dev->dst_nents, DMA_FROM_DEVICE);
2137 }
2138 if (pce_dev->src_nents) {
2139 dma_unmap_sg(pce_dev->pdev, areq->src,
2140 pce_dev->src_nents,
2141 (areq->src == areq->dst) ?
2142 DMA_BIDIRECTIONAL :
2143 DMA_TO_DEVICE);
2144 }
2145 }
2146 return rc;
2147}
2148EXPORT_SYMBOL(qce_ablk_cipher_req);
2149
2150int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
2151{
2152 struct qce_device *pce_dev = (struct qce_device *) handle;
2153 int rc;
2154 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
2155 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
2156
2157 _chain_buffer_in_init(pce_dev);
2158 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
2159 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
2160 DMA_TO_DEVICE);
2161
2162 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
2163 rc = -ENOMEM;
2164 goto bad;
2165 }
2166
2167 if (pad_len) {
2168 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2169 pad_len) < 0) {
2170 rc = -ENOMEM;
2171 goto bad;
2172 }
2173 }
2174 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
2175
2176#ifdef QCE_DEBUG
2177 _ce_in_dump(pce_dev);
2178#endif
2179
2180 rc = _sha_ce_setup(pce_dev, sreq);
2181
2182 if (rc < 0)
2183 goto bad;
2184
2185 pce_dev->areq = areq;
2186 pce_dev->qce_cb = sreq->qce_cb;
2187 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
2188
2189 rc = _qce_start_dma(pce_dev, true, false);
2190
2191 if (rc == 0)
2192 return 0;
2193bad:
2194 if (pce_dev->src_nents) {
2195 dma_unmap_sg(pce_dev->pdev, sreq->src,
2196 pce_dev->src_nents, DMA_TO_DEVICE);
2197 }
2198
2199 return rc;
2200}
2201EXPORT_SYMBOL(qce_process_sha_req);
2202
2203/*
2204 * crypto engine open function.
2205 */
2206void *qce_open(struct platform_device *pdev, int *rc)
2207{
2208 struct qce_device *pce_dev;
2209 struct resource *resource;
2210 struct clk *ce_clk;
2211
2212 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
2213 if (!pce_dev) {
2214 *rc = -ENOMEM;
2215 dev_err(&pdev->dev, "Can not allocate memory\n");
2216 return NULL;
2217 }
2218 pce_dev->pdev = &pdev->dev;
2219 ce_clk = clk_get(pce_dev->pdev, "ce_clk");
2220 if (IS_ERR(ce_clk)) {
Mona Hossaina8657d82011-07-11 16:30:08 -07002221 kfree(pce_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002222 *rc = PTR_ERR(ce_clk);
2223 return NULL;
2224 }
2225 pce_dev->ce_clk = ce_clk;
Mona Hossaina8657d82011-07-11 16:30:08 -07002226 *rc = clk_enable(pce_dev->ce_clk);
2227 if (*rc) {
2228 kfree(pce_dev);
2229 return NULL;
2230 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002231
2232 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2233 if (!resource) {
2234 *rc = -ENXIO;
2235 dev_err(pce_dev->pdev, "Missing MEM resource\n");
2236 goto err;
2237 };
2238 pce_dev->phy_iobase = resource->start;
2239 pce_dev->iobase = ioremap_nocache(resource->start,
2240 resource->end - resource->start + 1);
2241 if (!pce_dev->iobase) {
2242 *rc = -ENOMEM;
2243 dev_err(pce_dev->pdev, "Can not map io memory\n");
2244 goto err;
2245 }
2246
2247 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2248 GFP_KERNEL);
2249 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2250 GFP_KERNEL);
2251 if (pce_dev->chan_ce_in_cmd == NULL ||
2252 pce_dev->chan_ce_out_cmd == NULL) {
2253 dev_err(pce_dev->pdev, "Can not allocate memory\n");
2254 *rc = -ENOMEM;
2255 goto err;
2256 }
2257
2258 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2259 "crypto_channels");
2260 if (!resource) {
2261 *rc = -ENXIO;
2262 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
2263 goto err;
2264 };
2265 pce_dev->chan_ce_in = resource->start;
2266 pce_dev->chan_ce_out = resource->end;
2267 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2268 "crypto_crci_in");
2269 if (!resource) {
2270 *rc = -ENXIO;
2271 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
2272 goto err;
2273 };
2274 pce_dev->crci_in = resource->start;
2275 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2276 "crypto_crci_out");
2277 if (!resource) {
2278 *rc = -ENXIO;
2279 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
2280 goto err;
2281 };
2282 pce_dev->crci_out = resource->start;
2283 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2284 "crypto_crci_hash");
2285 if (!resource) {
2286 *rc = -ENXIO;
2287 dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
2288 goto err;
2289 };
2290 pce_dev->crci_hash = resource->start;
2291 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
2292 PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
2293
2294 if (pce_dev->coh_vmem == NULL) {
2295 *rc = -ENOMEM;
2296 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
2297 goto err;
2298 }
2299 _setup_cmd_template(pce_dev);
2300
2301 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
2302 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
2303
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002304 if (_init_ce_engine(pce_dev)) {
2305 *rc = -ENXIO;
2306 clk_disable(pce_dev->ce_clk);
2307 goto err;
2308 }
2309 *rc = 0;
2310 clk_disable(pce_dev->ce_clk);
2311
2312 pce_dev->err = 0;
2313
2314 return pce_dev;
2315err:
2316 if (pce_dev)
2317 qce_close(pce_dev);
2318 return NULL;
2319}
2320EXPORT_SYMBOL(qce_open);
2321
2322/*
2323 * crypto engine close function.
2324 */
2325int qce_close(void *handle)
2326{
2327 struct qce_device *pce_dev = (struct qce_device *) handle;
2328
2329 if (handle == NULL)
2330 return -ENODEV;
2331 if (pce_dev->iobase)
2332 iounmap(pce_dev->iobase);
2333
2334 if (pce_dev->coh_vmem)
2335 dma_free_coherent(pce_dev->pdev, PAGE_SIZE, pce_dev->coh_vmem,
2336 pce_dev->coh_pmem);
2337 kfree(pce_dev->chan_ce_in_cmd);
2338 kfree(pce_dev->chan_ce_out_cmd);
2339
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002340 clk_put(pce_dev->ce_clk);
Mona Hossain451cf982011-07-13 11:48:14 -07002341 kfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002342 return 0;
2343}
2344EXPORT_SYMBOL(qce_close);
2345
2346int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2347{
2348 struct qce_device *pce_dev = (struct qce_device *) handle;
2349
2350 if (ce_support == NULL)
2351 return -EINVAL;
2352
2353 if (pce_dev->hmac == 1)
2354 ce_support->sha1_hmac_20 = true;
2355 else
2356 ce_support->sha1_hmac_20 = false;
2357 ce_support->sha1_hmac = false;
2358 ce_support->sha256_hmac = false;
2359 ce_support->sha_hmac = false;
2360 ce_support->cmac = false;
2361 ce_support->aes_key_192 = true;
2362 ce_support->aes_xts = false;
2363 ce_support->aes_ccm = false;
2364 ce_support->ota = pce_dev->ota;
2365 return 0;
2366}
2367EXPORT_SYMBOL(qce_hw_support);
2368
2369int qce_f8_req(void *handle, struct qce_f8_req *req,
2370 void *cookie, qce_comp_func_ptr_t qce_cb)
2371{
2372 struct qce_device *pce_dev = (struct qce_device *) handle;
2373 bool key_stream_mode;
2374 dma_addr_t dst;
2375 int rc;
2376 uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
2377 req->data_len;
2378
2379 _chain_buffer_in_init(pce_dev);
2380 _chain_buffer_out_init(pce_dev);
2381
2382 key_stream_mode = (req->data_in == NULL);
2383
2384 /* F8 cipher input */
2385 if (key_stream_mode)
2386 pce_dev->phy_ota_src = 0;
2387 else {
2388 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2389 req->data_in, req->data_len,
2390 (req->data_in == req->data_out) ?
2391 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2392 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2393 req->data_len) < 0) {
2394 pce_dev->phy_ota_dst = 0;
2395 rc = -ENOMEM;
2396 goto bad;
2397 }
2398 }
2399
2400 /* F8 cipher output */
2401 if (req->data_in != req->data_out) {
2402 dst = dma_map_single(pce_dev->pdev, req->data_out,
2403 req->data_len, DMA_FROM_DEVICE);
2404 pce_dev->phy_ota_dst = dst;
2405 } else {
2406 dst = pce_dev->phy_ota_src;
2407 pce_dev->phy_ota_dst = 0;
2408 }
2409 if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
2410 rc = -ENOMEM;
2411 goto bad;
2412 }
2413
2414 pce_dev->ota_size = req->data_len;
2415
2416 /* pad data */
2417 if (pad_len) {
2418 if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
2419 pce_dev->phy_ce_pad, pad_len) < 0) {
2420 rc = -ENOMEM;
2421 goto bad;
2422 }
2423 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2424 pad_len) < 0) {
2425 rc = -ENOMEM;
2426 goto bad;
2427 }
2428 }
2429
2430 /* finalize the ce_in and ce_out channels command lists */
2431 if (!key_stream_mode)
2432 _ce_in_final(pce_dev, 1, req->data_len + pad_len);
2433 _ce_out_final(pce_dev, 1, req->data_len + pad_len);
2434
2435 /* set up crypto device */
2436 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
2437 if (rc < 0)
2438 goto bad;
2439
2440 /* setup for callback, and issue command to adm */
2441 pce_dev->areq = cookie;
2442 pce_dev->qce_cb = qce_cb;
2443
2444 if (!key_stream_mode)
2445 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2446
2447 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2448
2449 rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
2450 if (rc == 0)
2451 return 0;
2452bad:
2453 if (pce_dev->phy_ota_dst != 0)
2454 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
2455 req->data_len, DMA_FROM_DEVICE);
2456 if (pce_dev->phy_ota_src != 0)
2457 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2458 req->data_len,
2459 (req->data_in == req->data_out) ?
2460 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2461 return rc;
2462}
2463EXPORT_SYMBOL(qce_f8_req);
2464
2465int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
2466 void *cookie, qce_comp_func_ptr_t qce_cb)
2467{
2468 struct qce_device *pce_dev = (struct qce_device *) handle;
2469 uint16_t num_pkt = mreq->num_pkt;
2470 uint16_t cipher_start = mreq->cipher_start;
2471 uint16_t cipher_size = mreq->cipher_size;
2472 struct qce_f8_req *req = &mreq->qce_f8_req;
2473 uint32_t total;
2474 uint32_t pad_len;
2475 dma_addr_t dst = 0;
2476 int rc = 0;
2477
2478 total = num_pkt * req->data_len;
2479 pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
2480
2481 _chain_buffer_in_init(pce_dev);
2482 _chain_buffer_out_init(pce_dev);
2483
2484 /* F8 cipher input */
2485 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2486 req->data_in, total,
2487 (req->data_in == req->data_out) ?
2488 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2489 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2490 total) < 0) {
2491 pce_dev->phy_ota_dst = 0;
2492 rc = -ENOMEM;
2493 goto bad;
2494 }
2495 /* F8 cipher output */
2496 if (req->data_in != req->data_out) {
2497 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
2498 DMA_FROM_DEVICE);
2499 pce_dev->phy_ota_dst = dst;
2500 } else {
2501 dst = pce_dev->phy_ota_src;
2502 pce_dev->phy_ota_dst = 0;
2503 }
2504 if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
2505 rc = -ENOMEM;
2506 goto bad;
2507 }
2508
2509 pce_dev->ota_size = total;
2510
2511 /* pad data */
2512 if (pad_len) {
2513 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2514 pad_len) < 0) {
2515 rc = -ENOMEM;
2516 goto bad;
2517 }
2518 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2519 pad_len) < 0) {
2520 rc = -ENOMEM;
2521 goto bad;
2522 }
2523 }
2524
2525 /* finalize the ce_in and ce_out channels command lists */
2526 _ce_in_final(pce_dev, 1, total + pad_len);
2527 _ce_out_final(pce_dev, 1, total + pad_len);
2528
2529
2530 /* set up crypto device */
2531 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
2532 cipher_size);
2533 if (rc)
2534 goto bad ;
2535
2536 /* setup for callback, and issue command to adm */
2537 pce_dev->areq = cookie;
2538 pce_dev->qce_cb = qce_cb;
2539
2540 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2541 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2542
2543 rc = _qce_start_dma(pce_dev, true, true);
2544 if (rc == 0)
2545 return 0;
2546bad:
2547 if (pce_dev->phy_ota_dst)
2548 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
2549 DMA_FROM_DEVICE);
2550 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
2551 (req->data_in == req->data_out) ?
2552 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2553 return rc;
2554}
2555EXPORT_SYMBOL(qce_f8_multi_pkt_req);
2556
2557int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
2558 qce_comp_func_ptr_t qce_cb)
2559{
2560 struct qce_device *pce_dev = (struct qce_device *) handle;
2561 int rc;
2562 uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
2563
2564 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
2565 req->msize, DMA_TO_DEVICE);
2566
2567 _chain_buffer_in_init(pce_dev);
2568 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
2569 if (rc < 0) {
2570 rc = -ENOMEM;
2571 goto bad;
2572 }
2573
2574 pce_dev->ota_size = req->msize;
2575 if (pad_len) {
2576 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2577 pad_len);
2578 if (rc < 0) {
2579 rc = -ENOMEM;
2580 goto bad;
2581 }
2582 }
2583 _ce_in_final(pce_dev, 2, req->msize + pad_len);
2584 rc = _ce_f9_setup(pce_dev, req);
2585 if (rc < 0)
2586 goto bad;
2587
2588 /* setup for callback, and issue command to adm */
2589 pce_dev->areq = cookie;
2590 pce_dev->qce_cb = qce_cb;
2591
2592 pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
2593
2594 rc = _qce_start_dma(pce_dev, true, false);
2595 if (rc == 0)
2596 return 0;
2597bad:
2598 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2599 req->msize, DMA_TO_DEVICE);
2600 return rc;
2601}
2602EXPORT_SYMBOL(qce_f9_req);
2603
2604MODULE_LICENSE("GPL v2");
2605MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2606MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossain451cf982011-07-13 11:48:14 -07002607MODULE_VERSION("1.13");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002608