blob: 55cf651f568964d55c30c465e407eb2f3b1805b9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
27#include <crypto/hash.h>
28#include <crypto/sha.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070029#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <linux/qcota.h>
31#include <mach/dma.h>
32
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070033#include "qce.h"
34#include "qcryptohw_30.h"
35#include "qce_ota.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37/* ADM definitions */
38#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
39#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
40#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
41#define ADM_DESC_LAST (1 << 31)
42
43/* Data xfer between DM and CE in blocks of 16 bytes */
44#define ADM_CE_BLOCK_SIZE 16
45
Mona Hossaind90ea0e2011-08-11 16:51:07 -070046#define QCE_FIFO_SIZE 0x8000
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048/* Data xfer between DM and CE in blocks of 64 bytes */
49#define ADM_SHA_BLOCK_SIZE 64
50
51#define ADM_DESC_LENGTH_MASK 0xffff
52#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
53
54struct dmov_desc {
55 uint32_t addr;
56 uint32_t len;
57};
58
59#define ADM_STATUS_OK 0x80000002
60
61/* Misc definitions */
62
63/* QCE max number of descriptor in a descriptor list */
64#define QCE_MAX_NUM_DESC 128
65
66/* State of DM channel */
67enum qce_chan_st_enum {
68 QCE_CHAN_STATE_IDLE = 0,
69 QCE_CHAN_STATE_IN_PROG = 1,
70 QCE_CHAN_STATE_COMP = 2,
71 QCE_CHAN_STATE_LAST
72};
73
74/*
75 * CE HW device structure.
76 * Each engine has an instance of the structure.
77 * Each engine can only handle one crypto operation at one time. It is up to
78 * the sw above to ensure single threading of operation on an engine.
79 */
80struct qce_device {
81 struct device *pdev; /* Handle to platform_device structure */
82 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
83 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
84 void __iomem *iobase; /* Virtual io base of CE HW */
85 unsigned int phy_iobase; /* Physical io base of CE HW */
86 struct clk *ce_clk; /* Handle to CE clk */
87 unsigned int crci_in; /* CRCI for CE DM IN Channel */
88 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
89 unsigned int crci_hash; /* CRCI for CE HASH */
90 unsigned int chan_ce_in; /* ADM channel used for CE input
91 * and auth result if authentication
92 * only operation. */
93 unsigned int chan_ce_out; /* ADM channel used for CE output,
94 and icv for esp */
95
96
97 unsigned int *cmd_pointer_list_ce_in;
98 dma_addr_t phy_cmd_pointer_list_ce_in;
99
100 unsigned int *cmd_pointer_list_ce_out;
101 dma_addr_t phy_cmd_pointer_list_ce_out;
102
103 unsigned char *cmd_list_ce_in;
104 dma_addr_t phy_cmd_list_ce_in;
105
106 unsigned char *cmd_list_ce_out;
107 dma_addr_t phy_cmd_list_ce_out;
108
109 struct dmov_desc *ce_out_src_desc;
110 dma_addr_t phy_ce_out_src_desc;
111
112 struct dmov_desc *ce_out_dst_desc;
113 dma_addr_t phy_ce_out_dst_desc;
114
115 struct dmov_desc *ce_in_src_desc;
116 dma_addr_t phy_ce_in_src_desc;
117
118 struct dmov_desc *ce_in_dst_desc;
119 dma_addr_t phy_ce_in_dst_desc;
120
121 unsigned char *ce_out_ignore;
122 dma_addr_t phy_ce_out_ignore;
123
124 unsigned char *ce_pad;
125 dma_addr_t phy_ce_pad;
126
127 struct msm_dmov_cmd *chan_ce_in_cmd;
128 struct msm_dmov_cmd *chan_ce_out_cmd;
129
130 uint32_t ce_out_ignore_size;
131
132 int ce_out_dst_desc_index;
Mona Hossaind90ea0e2011-08-11 16:51:07 -0700133 int ce_in_dst_desc_index;
134
135 int ce_out_src_desc_index;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136 int ce_in_src_desc_index;
137
138 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
139 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
140
141 int chan_ce_in_status; /* chan ce_in status */
142 int chan_ce_out_status; /* chan ce_out status */
143
144
145 unsigned char *dig_result;
146 dma_addr_t phy_dig_result;
147
148 /* cached aes key */
149 uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
150
151 uint32_t aes_key_size; /* cached aes key size in bytes */
152 int fastaes; /* ce supports fast aes */
153 int hmac; /* ce support hmac-sha1 */
154 bool ota; /* ce support ota */
155
156 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
157
158 int assoc_nents;
159 int src_nents;
160 int dst_nents;
161
162 void *areq;
163 enum qce_cipher_mode_enum mode;
164
165 dma_addr_t phy_iv_in;
166 dma_addr_t phy_ota_src;
167 dma_addr_t phy_ota_dst;
168 unsigned int ota_size;
169 int err;
170};
171
172/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
173static uint32_t _std_init_vector_sha1[] = {
174 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
175};
176/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
177static uint32_t _std_init_vector_sha256[] = {
178 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
179 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
180};
181
182/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
183static const uint32_t _s_box[256] = {
184 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
185 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
186
187 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
188 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
189
190 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
191 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
192
193 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
194 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
195
196 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
197 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
198
199 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
200 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
201
202 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
203 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
204
205 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
206 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
207
208 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
209 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
210
211 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
212 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
213
214 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
215 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
216
217 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
218 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
219
220 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
221 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
222
223 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
224 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
225
226 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
227 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
228
229 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
230 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
231
232
233/*
234 * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
235 * Expansion.
236 */
237static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
238 uint32_t *AES_RND_KEY)
239{
240 uint32_t i;
241 uint32_t Nk;
242 uint32_t Nr, rot_data;
243 uint32_t Rcon = 0x01000000;
244 uint32_t temp;
245 uint32_t data_in;
246 uint32_t MSB_store;
247 uint32_t byte_for_sub;
248 uint32_t word_sub[4];
249
250 switch (keysize) {
251 case 192:
252 Nk = 6;
253 Nr = 12;
254 break;
255
256 case 256:
257 Nk = 8;
258 Nr = 14;
259 break;
260
261 case 128:
262 default: /* default to AES128 */
263 Nk = 4;
264 Nr = 10;
265 break;
266 }
267
268 /* key expansion */
269 i = 0;
270 while (i < Nk) {
271 AES_RND_KEY[i] = AES_KEY[i];
272 i = i + 1;
273 }
274
275 i = Nk;
276 while (i < (4 * (Nr + 1))) {
277 temp = AES_RND_KEY[i-1];
278 if (Nr == 14) {
279 switch (i) {
280 case 8:
281 Rcon = 0x01000000;
282 break;
283
284 case 16:
285 Rcon = 0x02000000;
286 break;
287
288 case 24:
289 Rcon = 0x04000000;
290 break;
291
292 case 32:
293 Rcon = 0x08000000;
294 break;
295
296 case 40:
297 Rcon = 0x10000000;
298 break;
299
300 case 48:
301 Rcon = 0x20000000;
302 break;
303
304 case 56:
305 Rcon = 0x40000000;
306 break;
307 }
308 } else if (Nr == 12) {
309 switch (i) {
310 case 6:
311 Rcon = 0x01000000;
312 break;
313
314 case 12:
315 Rcon = 0x02000000;
316 break;
317
318 case 18:
319 Rcon = 0x04000000;
320 break;
321
322 case 24:
323 Rcon = 0x08000000;
324 break;
325
326 case 30:
327 Rcon = 0x10000000;
328 break;
329
330 case 36:
331 Rcon = 0x20000000;
332 break;
333
334 case 42:
335 Rcon = 0x40000000;
336 break;
337
338 case 48:
339 Rcon = 0x80000000;
340 break;
341 }
342 } else if (Nr == 10) {
343 switch (i) {
344 case 4:
345 Rcon = 0x01000000;
346 break;
347
348 case 8:
349 Rcon = 0x02000000;
350 break;
351
352 case 12:
353 Rcon = 0x04000000;
354 break;
355
356 case 16:
357 Rcon = 0x08000000;
358 break;
359
360 case 20:
361 Rcon = 0x10000000;
362 break;
363
364 case 24:
365 Rcon = 0x20000000;
366 break;
367
368 case 28:
369 Rcon = 0x40000000;
370 break;
371
372 case 32:
373 Rcon = 0x80000000;
374 break;
375
376 case 36:
377 Rcon = 0x1b000000;
378 break;
379
380 case 40:
381 Rcon = 0x36000000;
382 break;
383 }
384 }
385
386 if ((i % Nk) == 0) {
387 data_in = temp;
388 MSB_store = (data_in >> 24 & 0xff);
389 rot_data = (data_in << 8) | MSB_store;
390 byte_for_sub = rot_data;
391 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
392 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
393 << 8);
394 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
395 << 16);
396 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
397 >> 24)] << 24);
398 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
399 word_sub[3];
400 temp = word_sub[0] ^ Rcon;
401 } else if ((Nk > 6) && ((i % Nk) == 4)) {
402 byte_for_sub = temp;
403 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
404 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
405 << 8);
406 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
407 << 16);
408 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
409 24)] << 24);
410 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
411 word_sub[3];
412 temp = word_sub[0];
413 }
414
415 AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
416 i = i+1;
417 }
418}
419
420static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
421 unsigned int len)
422{
423 unsigned n;
424
425 n = len / sizeof(uint32_t) ;
426 for (; n > 0; n--) {
427 *iv = ((*b << 24) & 0xff000000) |
428 (((*(b+1)) << 16) & 0xff0000) |
429 (((*(b+2)) << 8) & 0xff00) |
430 (*(b+3) & 0xff);
431 b += sizeof(uint32_t);
432 iv++;
433 }
434
435 n = len % sizeof(uint32_t);
436 if (n == 3) {
437 *iv = ((*b << 24) & 0xff000000) |
438 (((*(b+1)) << 16) & 0xff0000) |
439 (((*(b+2)) << 8) & 0xff00) ;
440 } else if (n == 2) {
441 *iv = ((*b << 24) & 0xff000000) |
442 (((*(b+1)) << 16) & 0xff0000) ;
443 } else if (n == 1) {
444 *iv = ((*b << 24) & 0xff000000) ;
445 }
446}
447
448static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
449 unsigned int len)
450{
451 unsigned n = len / sizeof(uint32_t);
452
453 for (; n > 0; n--) {
454 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
455 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
456 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
457 *b++ = (unsigned char) (*iv & 0xff);
458 iv++;
459 }
460 n = len % sizeof(uint32_t);
461 if (n == 3) {
462 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
463 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
464 *b = (unsigned char) ((*iv >> 8) & 0xff);
465 } else if (n == 2) {
466 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
467 *b = (unsigned char) ((*iv >> 16) & 0xff);
468 } else if (n == 1) {
469 *b = (unsigned char) ((*iv >> 24) & 0xff);
470 }
471}
472
473static int count_sg(struct scatterlist *sg, int nbytes)
474{
475 int i;
476
477 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
478 nbytes -= sg->length;
479 return i;
480}
481
482static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
483 struct scatterlist *sg)
484{
485 int i = 0;
486 for (i = 0; i < entries; i++) {
487
488 sg->dma_address = (dma_addr_t)pmem->offset;
489 sg++;
490 pmem++;
491 }
492 return 0;
493}
494
495static int _probe_ce_engine(struct qce_device *pce_dev)
496{
497 unsigned int val;
498 unsigned int rev;
499 unsigned int eng_availability; /* engine available functions */
500
501 val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
502 if ((val & 0xfffffff) != 0x0200004) {
503 dev_err(pce_dev->pdev,
504 "unknown Qualcomm crypto device at 0x%x 0x%x\n",
505 pce_dev->phy_iobase, val);
506 return -EIO;
507 };
508 rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
509 if (rev == 0x2) {
510 dev_info(pce_dev->pdev,
511 "Qualcomm Crypto 3e device found at 0x%x\n",
512 pce_dev->phy_iobase);
513 } else if (rev == 0x1) {
514 dev_info(pce_dev->pdev,
515 "Qualcomm Crypto 3 device found at 0x%x\n",
516 pce_dev->phy_iobase);
517 } else if (rev == 0x0) {
518 dev_info(pce_dev->pdev,
519 "Qualcomm Crypto 2 device found at 0x%x\n",
520 pce_dev->phy_iobase);
521 } else {
522 dev_err(pce_dev->pdev,
523 "unknown Qualcomm crypto device at 0x%x\n",
524 pce_dev->phy_iobase);
525 return -EIO;
526 }
527
528 eng_availability = readl_relaxed(pce_dev->iobase +
529 CRYPTO_ENGINES_AVAIL);
530
531 if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
532 == CRYPTO_AES_SEL_FAST)
533 pce_dev->fastaes = 1;
534 else
535 pce_dev->fastaes = 0;
536
537 if (eng_availability & (1 << CRYPTO_HMAC_SEL))
538 pce_dev->hmac = 1;
539 else
540 pce_dev->hmac = 0;
541
542 if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
543 (eng_availability & (1 << CRYPTO_F8_SEL)))
544 pce_dev->ota = true;
545 else
546 pce_dev->ota = false;
547
548 pce_dev->aes_key_size = 0;
549
550 return 0;
551};
552
553static int _init_ce_engine(struct qce_device *pce_dev)
554{
555 unsigned int val;
556
557 /* reset qce */
558 writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
559
560 /* Ensure previous instruction (write to reset bit)
561 * was completed.
562 */
563 mb();
564 /* configure ce */
565 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
566 (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
567 (1 << CRYPTO_MASK_ERR_INTR);
568 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
569
570 if (_probe_ce_engine(pce_dev) < 0)
571 return -EIO;
572 if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
573 dev_err(pce_dev->pdev,
574 "unknown Qualcomm crypto device at 0x%x\n",
575 pce_dev->phy_iobase);
576 return -EIO;
577 };
578 return 0;
579};
580
581static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
582{
583 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
584 uint32_t diglen;
585 int rc;
586 int i;
587 uint32_t cfg = 0;
588
589 /* if not the last, the size has to be on the block boundary */
590 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
591 return -EIO;
592
593 switch (sreq->alg) {
594 case QCE_HASH_SHA1:
595 diglen = SHA1_DIGEST_SIZE;
596 break;
597 case QCE_HASH_SHA256:
598 diglen = SHA256_DIGEST_SIZE;
599 break;
600 default:
601 return -EINVAL;
602 }
603 /*
604 * write 20/32 bytes, 5/8 words into auth_iv
605 * for SHA1/SHA256
606 */
607
608 if (sreq->first_blk) {
609 if (sreq->alg == QCE_HASH_SHA1) {
610 for (i = 0; i < 5; i++)
611 auth32[i] = _std_init_vector_sha1[i];
612 } else {
613 for (i = 0; i < 8; i++)
614 auth32[i] = _std_init_vector_sha256[i];
615 }
616 } else
617 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
618
619 rc = clk_enable(pce_dev->ce_clk);
620 if (rc)
621 return rc;
622
623 writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
624 writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
625 writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
626 writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
627 writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
628
629 if (sreq->alg == QCE_HASH_SHA256) {
630 writel_relaxed(auth32[5], pce_dev->iobase +
631 CRYPTO_AUTH_IV5_REG);
632 writel_relaxed(auth32[6], pce_dev->iobase +
633 CRYPTO_AUTH_IV6_REG);
634 writel_relaxed(auth32[7], pce_dev->iobase +
635 CRYPTO_AUTH_IV7_REG);
636 }
637 /* write auth_bytecnt 0/1, start with 0 */
638 writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
639 CRYPTO_AUTH_BYTECNT0_REG);
640 writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
641 CRYPTO_AUTH_BYTECNT1_REG);
642
643 /* write auth_seg_cfg */
644 writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
645 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
646
647 /*
648 * write seg_cfg
649 */
650
651 if (sreq->alg == QCE_HASH_SHA1)
652 cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
653 else
654 cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
655
656 if (sreq->first_blk)
657 cfg |= 1 << CRYPTO_FIRST;
658 if (sreq->last_blk)
659 cfg |= 1 << CRYPTO_LAST;
660 cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
661 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
662
663 /* write seg_size */
664 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
665
666 /* issue go to crypto */
667 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
668 /* Ensure previous instructions (setting the GO register)
669 * was completed before issuing a DMA transfer request
670 */
671 mb();
672
673 return 0;
674}
675
676static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
677 uint32_t totallen, uint32_t coffset)
678{
679 uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
680 0, 0, 0, 0, 0};
681 uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
682 0, 0, 0, 0, 0, 0, 0, 0};
683 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
684 0, 0, 0, 0};
685 uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
686 int aes_key_chg;
687 int i, rc;
688 uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
689 uint32_t cfg;
690 uint32_t ivsize = q_req->ivsize;
691
692 rc = clk_enable(pce_dev->ce_clk);
693 if (rc)
694 return rc;
695
696 cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
697 if (q_req->op == QCE_REQ_AEAD) {
698
699 /* do authentication setup */
700
701 cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
702 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
703
704 /* write sha1 init vector */
705 writel_relaxed(_std_init_vector_sha1[0],
706 pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
707 writel_relaxed(_std_init_vector_sha1[1],
708 pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
709 writel_relaxed(_std_init_vector_sha1[2],
710 pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
711 writel_relaxed(_std_init_vector_sha1[3],
712 pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
713 writel_relaxed(_std_init_vector_sha1[4],
714 pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
715 /* write hmac key */
716 _byte_stream_to_net_words(hmackey, q_req->authkey,
717 q_req->authklen);
718 writel_relaxed(hmackey[0], pce_dev->iobase +
719 CRYPTO_AUTH_IV5_REG);
720 writel_relaxed(hmackey[1], pce_dev->iobase +
721 CRYPTO_AUTH_IV6_REG);
722 writel_relaxed(hmackey[2], pce_dev->iobase +
723 CRYPTO_AUTH_IV7_REG);
724 writel_relaxed(hmackey[3], pce_dev->iobase +
725 CRYPTO_AUTH_IV8_REG);
726 writel_relaxed(hmackey[4], pce_dev->iobase +
727 CRYPTO_AUTH_IV9_REG);
728 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
729 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
730
731 /* write auth_seg_cfg */
732 writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
733 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
734
735 }
736
737 _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
738
739 switch (q_req->mode) {
740 case QCE_MODE_ECB:
741 cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
742 break;
743
744 case QCE_MODE_CBC:
745 cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
746 break;
747
748 case QCE_MODE_CTR:
749 default:
750 cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
751 break;
752 }
753 pce_dev->mode = q_req->mode;
754
755 switch (q_req->alg) {
756 case CIPHER_ALG_DES:
757 if (q_req->mode != QCE_MODE_ECB) {
758 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
759 writel_relaxed(enciv32[0], pce_dev->iobase +
760 CRYPTO_CNTR0_IV0_REG);
761 writel_relaxed(enciv32[1], pce_dev->iobase +
762 CRYPTO_CNTR1_IV1_REG);
763 }
764 writel_relaxed(enckey32[0], pce_dev->iobase +
765 CRYPTO_DES_KEY0_REG);
766 writel_relaxed(enckey32[1], pce_dev->iobase +
767 CRYPTO_DES_KEY1_REG);
768 cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
769 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
770 break;
771
772 case CIPHER_ALG_3DES:
773 if (q_req->mode != QCE_MODE_ECB) {
774 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
775 writel_relaxed(enciv32[0], pce_dev->iobase +
776 CRYPTO_CNTR0_IV0_REG);
777 writel_relaxed(enciv32[1], pce_dev->iobase +
778 CRYPTO_CNTR1_IV1_REG);
779 }
780 writel_relaxed(enckey32[0], pce_dev->iobase +
781 CRYPTO_DES_KEY0_REG);
782 writel_relaxed(enckey32[1], pce_dev->iobase +
783 CRYPTO_DES_KEY1_REG);
784 writel_relaxed(enckey32[2], pce_dev->iobase +
785 CRYPTO_DES_KEY2_REG);
786 writel_relaxed(enckey32[3], pce_dev->iobase +
787 CRYPTO_DES_KEY3_REG);
788 writel_relaxed(enckey32[4], pce_dev->iobase +
789 CRYPTO_DES_KEY4_REG);
790 writel_relaxed(enckey32[5], pce_dev->iobase +
791 CRYPTO_DES_KEY5_REG);
792 cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
793 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
794 break;
795
796 case CIPHER_ALG_AES:
797 default:
798 if (q_req->mode != QCE_MODE_ECB) {
799 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
800 writel_relaxed(enciv32[0], pce_dev->iobase +
801 CRYPTO_CNTR0_IV0_REG);
802 writel_relaxed(enciv32[1], pce_dev->iobase +
803 CRYPTO_CNTR1_IV1_REG);
804 writel_relaxed(enciv32[2], pce_dev->iobase +
805 CRYPTO_CNTR2_IV2_REG);
806 writel_relaxed(enciv32[3], pce_dev->iobase +
807 CRYPTO_CNTR3_IV3_REG);
808 }
809 /* set number of counter bits */
810 writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
811
812 if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
813 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
814 CRYPTO_ENCR_KEY_SZ);
815 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
816 } else {
817 switch (q_req->encklen) {
818 case AES128_KEY_SIZE:
819 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
820 CRYPTO_ENCR_KEY_SZ);
821 break;
822 case AES192_KEY_SIZE:
823 cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
824 CRYPTO_ENCR_KEY_SZ);
825 break;
826 case AES256_KEY_SIZE:
827 default:
828 cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
829 CRYPTO_ENCR_KEY_SZ);
830
831 /* check for null key. If null, use hw key*/
832 for (i = 0; i < enck_size_in_word; i++) {
833 if (enckey32[i] != 0)
834 break;
835 }
836 if (i == enck_size_in_word)
837 cfg |= 1 << CRYPTO_USE_HW_KEY;
838 break;
839 } /* end of switch (q_req->encklen) */
840
841 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
842 if (pce_dev->aes_key_size != q_req->encklen)
843 aes_key_chg = 1;
844 else {
845 for (i = 0; i < enck_size_in_word; i++) {
846 if (enckey32[i] != pce_dev->aeskey[i])
847 break;
848 }
849 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
850 }
851
852 if (aes_key_chg) {
853 if (pce_dev->fastaes) {
854 for (i = 0; i < enck_size_in_word;
855 i++) {
856 writel_relaxed(enckey32[i],
857 pce_dev->iobase +
858 CRYPTO_AES_RNDKEY0 +
859 (i * sizeof(uint32_t)));
860 }
861 } else {
862 /* size in bit */
863 _aes_expand_key_schedule(
864 q_req->encklen * 8,
865 enckey32, aes_round_key);
866
867 for (i = 0; i < CRYPTO_AES_RNDKEYS;
868 i++) {
869 writel_relaxed(aes_round_key[i],
870 pce_dev->iobase +
871 CRYPTO_AES_RNDKEY0 +
872 (i * sizeof(uint32_t)));
873 }
874 }
875
876 pce_dev->aes_key_size = q_req->encklen;
877 for (i = 0; i < enck_size_in_word; i++)
878 pce_dev->aeskey[i] = enckey32[i];
879 } /*if (aes_key_chg) { */
880 } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
881 break;
882 } /* end of switch (q_req->mode) */
883
884 if (q_req->dir == QCE_ENCRYPT)
885 cfg |= (1 << CRYPTO_AUTH_POS);
886 cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
887
888 /* write encr seg cfg */
889 writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
890 (coffset & 0xffff), /* cipher offset */
891 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
892
893 /* write seg cfg and size */
894 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
895 writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
896
897 /* issue go to crypto */
898 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
899 /* Ensure previous instructions (setting the GO register)
900 * was completed before issuing a DMA transfer request
901 */
902 mb();
903 return 0;
904};
905
906static int _aead_complete(struct qce_device *pce_dev)
907{
908 struct aead_request *areq;
909 struct crypto_aead *aead;
910 uint32_t ivsize;
911 uint32_t iv_out[4];
912 unsigned char iv[4 * sizeof(uint32_t)];
913 uint32_t status;
914
915 areq = (struct aead_request *) pce_dev->areq;
916 aead = crypto_aead_reqtfm(areq);
917 ivsize = crypto_aead_ivsize(aead);
918
919 if (areq->src != areq->dst) {
920 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
921 DMA_FROM_DEVICE);
922 }
923 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
924 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
925 DMA_TO_DEVICE);
926 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
927 ivsize, DMA_TO_DEVICE);
928 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
929 DMA_TO_DEVICE);
930
931 /* check ce error status */
932 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
933 if (status & (1 << CRYPTO_SW_ERR)) {
934 pce_dev->err++;
935 dev_err(pce_dev->pdev,
936 "Qualcomm Crypto Error at 0x%x, status%x\n",
937 pce_dev->phy_iobase, status);
938 _init_ce_engine(pce_dev);
939 clk_disable(pce_dev->ce_clk);
940 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
941 return 0;
942 };
943
944 /* get iv out */
945 if (pce_dev->mode == QCE_MODE_ECB) {
946 clk_disable(pce_dev->ce_clk);
947 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
948 pce_dev->chan_ce_in_status |
949 pce_dev->chan_ce_out_status);
950 } else {
951
952 iv_out[0] = readl_relaxed(pce_dev->iobase +
953 CRYPTO_CNTR0_IV0_REG);
954 iv_out[1] = readl_relaxed(pce_dev->iobase +
955 CRYPTO_CNTR1_IV1_REG);
956 iv_out[2] = readl_relaxed(pce_dev->iobase +
957 CRYPTO_CNTR2_IV2_REG);
958 iv_out[3] = readl_relaxed(pce_dev->iobase +
959 CRYPTO_CNTR3_IV3_REG);
960
961 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
962 clk_disable(pce_dev->ce_clk);
963 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
964 pce_dev->chan_ce_in_status |
965 pce_dev->chan_ce_out_status);
966 };
967 return 0;
968};
969
970static void _sha_complete(struct qce_device *pce_dev)
971{
972
973 struct ahash_request *areq;
974 uint32_t auth_data[2];
975 uint32_t status;
976
977 areq = (struct ahash_request *) pce_dev->areq;
978 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
979 DMA_TO_DEVICE);
980
981 /* check ce error status */
982 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
983 if (status & (1 << CRYPTO_SW_ERR)) {
984 pce_dev->err++;
985 dev_err(pce_dev->pdev,
986 "Qualcomm Crypto Error at 0x%x, status%x\n",
987 pce_dev->phy_iobase, status);
988 _init_ce_engine(pce_dev);
989 clk_disable(pce_dev->ce_clk);
990 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
991 return;
992 };
993
994 auth_data[0] = readl_relaxed(pce_dev->iobase +
995 CRYPTO_AUTH_BYTECNT0_REG);
996 auth_data[1] = readl_relaxed(pce_dev->iobase +
997 CRYPTO_AUTH_BYTECNT1_REG);
998 /* Ensure previous instruction (retriving byte count information)
999 * was completed before disabling the clk.
1000 */
1001 mb();
1002 clk_disable(pce_dev->ce_clk);
1003 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
1004 pce_dev->chan_ce_in_status);
1005};
1006
1007static int _ablk_cipher_complete(struct qce_device *pce_dev)
1008{
1009 struct ablkcipher_request *areq;
1010 uint32_t iv_out[4];
1011 unsigned char iv[4 * sizeof(uint32_t)];
1012 uint32_t status;
1013
1014 areq = (struct ablkcipher_request *) pce_dev->areq;
1015
1016 if (areq->src != areq->dst) {
1017 dma_unmap_sg(pce_dev->pdev, areq->dst,
1018 pce_dev->dst_nents, DMA_FROM_DEVICE);
1019 }
1020 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1021 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1022 DMA_TO_DEVICE);
1023
1024 /* check ce error status */
1025 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1026 if (status & (1 << CRYPTO_SW_ERR)) {
1027 pce_dev->err++;
1028 dev_err(pce_dev->pdev,
1029 "Qualcomm Crypto Error at 0x%x, status%x\n",
1030 pce_dev->phy_iobase, status);
1031 _init_ce_engine(pce_dev);
1032 clk_disable(pce_dev->ce_clk);
1033 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1034 return 0;
1035 };
1036
1037 /* get iv out */
1038 if (pce_dev->mode == QCE_MODE_ECB) {
1039 clk_disable(pce_dev->ce_clk);
1040 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1041 pce_dev->chan_ce_out_status);
1042 } else {
1043 iv_out[0] = readl_relaxed(pce_dev->iobase +
1044 CRYPTO_CNTR0_IV0_REG);
1045 iv_out[1] = readl_relaxed(pce_dev->iobase +
1046 CRYPTO_CNTR1_IV1_REG);
1047 iv_out[2] = readl_relaxed(pce_dev->iobase +
1048 CRYPTO_CNTR2_IV2_REG);
1049 iv_out[3] = readl_relaxed(pce_dev->iobase +
1050 CRYPTO_CNTR3_IV3_REG);
1051
1052 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1053 clk_disable(pce_dev->ce_clk);
1054 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1055 pce_dev->chan_ce_out_status);
1056 }
1057
1058 return 0;
1059};
1060
1061static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
1062{
1063 struct ablkcipher_request *areq;
1064 uint32_t iv_out[4];
1065 unsigned char iv[4 * sizeof(uint32_t)];
1066 uint32_t status;
1067
1068 areq = (struct ablkcipher_request *) pce_dev->areq;
1069
1070 /* check ce error status */
1071 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1072 if (status & (1 << CRYPTO_SW_ERR)) {
1073 pce_dev->err++;
1074 dev_err(pce_dev->pdev,
1075 "Qualcomm Crypto Error at 0x%x, status%x\n",
1076 pce_dev->phy_iobase, status);
1077 _init_ce_engine(pce_dev);
1078 clk_disable(pce_dev->ce_clk);
1079 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1080 return 0;
1081 };
1082
1083 /* get iv out */
1084 if (pce_dev->mode == QCE_MODE_ECB) {
1085 clk_disable(pce_dev->ce_clk);
1086 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1087 pce_dev->chan_ce_out_status);
1088 } else {
1089 iv_out[0] = readl_relaxed(pce_dev->iobase +
1090 CRYPTO_CNTR0_IV0_REG);
1091 iv_out[1] = readl_relaxed(pce_dev->iobase +
1092 CRYPTO_CNTR1_IV1_REG);
1093 iv_out[2] = readl_relaxed(pce_dev->iobase +
1094 CRYPTO_CNTR2_IV2_REG);
1095 iv_out[3] = readl_relaxed(pce_dev->iobase +
1096 CRYPTO_CNTR3_IV3_REG);
1097
1098 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1099 clk_disable(pce_dev->ce_clk);
1100 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1101 pce_dev->chan_ce_out_status);
1102 }
1103
1104 return 0;
1105};
1106
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001107static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
1108 unsigned int plen, unsigned int paddr, int *index)
1109{
1110 while (plen > QCE_FIFO_SIZE) {
1111 pdesc->len = QCE_FIFO_SIZE;
1112 if (paddr > 0) {
1113 pdesc->addr = paddr;
1114 paddr += QCE_FIFO_SIZE;
1115 }
1116 plen -= pdesc->len;
1117 if (plen > 0) {
1118 *index = (*index) + 1;
1119 if ((*index) >= QCE_MAX_NUM_DESC)
1120 return -ENOMEM;
1121 pdesc++;
1122 }
1123 }
1124 if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) {
1125 pdesc->len = plen;
1126 if (paddr > 0)
1127 pdesc->addr = paddr;
1128 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001130 return 0;
1131}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132
1133static int _chain_sg_buffer_in(struct qce_device *pce_dev,
1134 struct scatterlist *sg, unsigned int nbytes)
1135{
1136 unsigned int len;
1137 unsigned int dlen;
1138 struct dmov_desc *pdesc;
1139
1140 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1141 /*
1142 * Two consective chunks may be handled by the old
1143 * buffer descriptor.
1144 */
1145 while (nbytes > 0) {
1146 len = min(nbytes, sg_dma_len(sg));
1147 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1148 nbytes -= len;
1149 if (dlen == 0) {
1150 pdesc->addr = sg_dma_address(sg);
1151 pdesc->len = len;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001152 if (pdesc->len > QCE_FIFO_SIZE) {
1153 if (qce_split_and_insert_dm_desc(pdesc,
1154 pdesc->len, sg_dma_address(sg),
1155 &pce_dev->ce_in_src_desc_index))
1156 return -EIO;
1157 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001158 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 pdesc->len = dlen + len;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001160 if (pdesc->len > QCE_FIFO_SIZE) {
1161 if (qce_split_and_insert_dm_desc(pdesc,
1162 pdesc->len, pdesc->addr,
1163 &pce_dev->ce_in_src_desc_index))
1164 return -EIO;
1165 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001166 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 pce_dev->ce_in_src_desc_index++;
1168 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1169 return -ENOMEM;
1170 pdesc++;
1171 pdesc->len = len;
1172 pdesc->addr = sg_dma_address(sg);
Mona Hossain6f8108f2011-09-13 12:45:09 -07001173 if (pdesc->len > QCE_FIFO_SIZE) {
1174 if (qce_split_and_insert_dm_desc(pdesc,
1175 pdesc->len, sg_dma_address(sg),
1176 &pce_dev->ce_in_src_desc_index))
1177 return -EIO;
1178 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 }
1180 if (nbytes > 0)
1181 sg = sg_next(sg);
1182 }
1183 return 0;
1184}
1185
1186static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1187 unsigned int pmem, unsigned int nbytes)
1188{
1189 unsigned int dlen;
1190 struct dmov_desc *pdesc;
1191
1192 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1193 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1194 if (dlen == 0) {
1195 pdesc->addr = pmem;
1196 pdesc->len = nbytes;
1197 } else if (pmem == (pdesc->addr + dlen)) {
1198 pdesc->len = dlen + nbytes;
1199 } else {
1200 pce_dev->ce_in_src_desc_index++;
1201 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1202 return -ENOMEM;
1203 pdesc++;
1204 pdesc->len = nbytes;
1205 pdesc->addr = pmem;
1206 }
1207 return 0;
1208}
1209
1210static void _chain_buffer_in_init(struct qce_device *pce_dev)
1211{
1212 struct dmov_desc *pdesc;
1213
1214 pce_dev->ce_in_src_desc_index = 0;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001215 pce_dev->ce_in_dst_desc_index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 pdesc = pce_dev->ce_in_src_desc;
1217 pdesc->len = 0;
1218}
1219
1220static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1221{
1222 struct dmov_desc *pdesc;
1223 dmov_sg *pcmd;
1224
1225 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1226 pdesc->len |= ADM_DESC_LAST;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001227
1228 pdesc = pce_dev->ce_in_dst_desc;
1229 if (total > QCE_FIFO_SIZE) {
1230 qce_split_and_insert_dm_desc(pdesc, total, 0,
1231 &pce_dev->ce_in_dst_desc_index);
1232 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1233 pdesc->len |= ADM_DESC_LAST;
1234 } else
1235 pdesc->len = ADM_DESC_LAST | total;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001236
1237 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1238 if (ncmd == 1)
1239 pcmd->cmd |= CMD_LC;
1240 else {
1241 dmov_s *pscmd;
1242
1243 pcmd->cmd &= ~CMD_LC;
1244 pcmd++;
1245 pscmd = (dmov_s *)pcmd;
1246 pscmd->cmd |= CMD_LC;
1247 }
1248
1249#ifdef QCE_DEBUG
1250 dev_info(pce_dev->pdev, "_ce_in_final %d\n",
1251 pce_dev->ce_in_src_desc_index);
1252#endif
1253}
1254
1255#ifdef QCE_DEBUG
1256static void _ce_in_dump(struct qce_device *pce_dev)
1257{
1258 int i;
1259 struct dmov_desc *pdesc;
1260
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001261 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1263 pdesc = pce_dev->ce_in_src_desc + i;
1264 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1265 pdesc->len);
1266 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001267 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1268 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1269 pdesc = pce_dev->ce_in_dst_desc + i;
1270 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001271 pdesc->len);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001272 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273};
1274
1275static void _ce_out_dump(struct qce_device *pce_dev)
1276{
1277 int i;
1278 struct dmov_desc *pdesc;
1279
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001280 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1281 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1282 pdesc = pce_dev->ce_out_src_desc + i;
1283 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1284 pdesc->len);
1285 }
1286
1287 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1289 pdesc = pce_dev->ce_out_dst_desc + i;
1290 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1291 pdesc->len);
1292 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293};
1294#endif
1295
1296static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1297 struct scatterlist *sg, unsigned int nbytes)
1298{
1299 unsigned int len;
1300 unsigned int dlen;
1301 struct dmov_desc *pdesc;
1302
1303 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1304 /*
1305 * Two consective chunks may be handled by the old
1306 * buffer descriptor.
1307 */
1308 while (nbytes > 0) {
1309 len = min(nbytes, sg_dma_len(sg));
1310 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1311 nbytes -= len;
1312 if (dlen == 0) {
1313 pdesc->addr = sg_dma_address(sg);
1314 pdesc->len = len;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001315 if (pdesc->len > QCE_FIFO_SIZE) {
1316 if (qce_split_and_insert_dm_desc(pdesc,
1317 pdesc->len, sg_dma_address(sg),
1318 &pce_dev->ce_out_dst_desc_index))
1319 return -EIO;
1320 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001321 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1322 pdesc->len = dlen + len;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001323 if (pdesc->len > QCE_FIFO_SIZE) {
1324 if (qce_split_and_insert_dm_desc(pdesc,
1325 pdesc->len, pdesc->addr,
1326 &pce_dev->ce_out_dst_desc_index))
1327 return -EIO;
1328 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 } else {
1330 pce_dev->ce_out_dst_desc_index++;
1331 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1332 return -EIO;
1333 pdesc++;
1334 pdesc->len = len;
1335 pdesc->addr = sg_dma_address(sg);
Mona Hossain6f8108f2011-09-13 12:45:09 -07001336 if (pdesc->len > QCE_FIFO_SIZE) {
1337 if (qce_split_and_insert_dm_desc(pdesc,
1338 pdesc->len, sg_dma_address(sg),
1339 &pce_dev->ce_out_dst_desc_index))
1340 return -EIO;
1341 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 }
1343 if (nbytes > 0)
1344 sg = sg_next(sg);
1345 }
1346 return 0;
1347}
1348
1349static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1350 unsigned int pmem, unsigned int nbytes)
1351{
1352 unsigned int dlen;
1353 struct dmov_desc *pdesc;
1354
1355 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1356 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1357
1358 if (dlen == 0) {
1359 pdesc->addr = pmem;
1360 pdesc->len = nbytes;
1361 } else if (pmem == (pdesc->addr + dlen)) {
1362 pdesc->len = dlen + nbytes;
1363 } else {
1364 pce_dev->ce_out_dst_desc_index++;
1365 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1366 return -EIO;
1367 pdesc++;
1368 pdesc->len = nbytes;
1369 pdesc->addr = pmem;
1370 }
1371 return 0;
1372};
1373
1374static void _chain_buffer_out_init(struct qce_device *pce_dev)
1375{
1376 struct dmov_desc *pdesc;
1377
1378 pce_dev->ce_out_dst_desc_index = 0;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001379 pce_dev->ce_out_src_desc_index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001380 pdesc = pce_dev->ce_out_dst_desc;
1381 pdesc->len = 0;
1382};
1383
1384static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1385{
1386 struct dmov_desc *pdesc;
1387 dmov_sg *pcmd;
1388
1389 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1390 pdesc->len |= ADM_DESC_LAST;
Mona Hossain6f8108f2011-09-13 12:45:09 -07001391
1392 pdesc = pce_dev->ce_out_src_desc;
1393 if (total > QCE_FIFO_SIZE) {
1394 qce_split_and_insert_dm_desc(pdesc, total, 0,
1395 &pce_dev->ce_out_src_desc_index);
1396 pdesc = pce_dev->ce_out_src_desc +
1397 pce_dev->ce_out_src_desc_index;
1398 pdesc->len |= ADM_DESC_LAST;
1399 } else
1400 pdesc->len = ADM_DESC_LAST | total;
1401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1403 if (ncmd == 1)
1404 pcmd->cmd |= CMD_LC;
1405 else {
1406 dmov_s *pscmd;
1407
1408 pcmd->cmd &= ~CMD_LC;
1409 pcmd++;
1410 pscmd = (dmov_s *)pcmd;
1411 pscmd->cmd |= CMD_LC;
1412 }
1413#ifdef QCE_DEBUG
1414 dev_info(pce_dev->pdev, "_ce_out_final %d\n",
1415 pce_dev->ce_out_dst_desc_index);
1416#endif
1417
1418};
1419
1420static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1421 unsigned int result, struct msm_dmov_errdata *err)
1422{
1423 struct qce_device *pce_dev;
1424
1425 pce_dev = (struct qce_device *) cmd_ptr->user;
1426 if (result != ADM_STATUS_OK) {
1427 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1428 result);
1429 pce_dev->chan_ce_in_status = -1;
1430 } else
1431 pce_dev->chan_ce_in_status = 0;
1432
1433 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1434 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1435 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1436 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1437
1438 /* done */
1439 _aead_complete(pce_dev);
1440 }
1441};
1442
1443static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1444 unsigned int result, struct msm_dmov_errdata *err)
1445{
1446 struct qce_device *pce_dev;
1447
1448 pce_dev = (struct qce_device *) cmd_ptr->user;
1449 if (result != ADM_STATUS_OK) {
1450 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1451 result);
1452 pce_dev->chan_ce_out_status = -1;
1453 } else {
1454 pce_dev->chan_ce_out_status = 0;
1455 };
1456
1457 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1458 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1459 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1460 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1461
1462 /* done */
1463 _aead_complete(pce_dev);
1464 }
1465
1466};
1467
1468static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1469 unsigned int result, struct msm_dmov_errdata *err)
1470{
1471 struct qce_device *pce_dev;
1472
1473 pce_dev = (struct qce_device *) cmd_ptr->user;
1474 if (result != ADM_STATUS_OK) {
1475 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1476 result);
1477 pce_dev->chan_ce_in_status = -1;
1478 } else
1479 pce_dev->chan_ce_in_status = 0;
1480 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1481 _sha_complete(pce_dev);
1482};
1483
1484static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1485 unsigned int result, struct msm_dmov_errdata *err)
1486{
1487 struct qce_device *pce_dev;
1488
1489 pce_dev = (struct qce_device *) cmd_ptr->user;
1490 if (result != ADM_STATUS_OK) {
1491 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1492 result);
1493 pce_dev->chan_ce_in_status = -1;
1494 } else
1495 pce_dev->chan_ce_in_status = 0;
1496
1497 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1498 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1499 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1500 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1501
1502 /* done */
1503 _ablk_cipher_complete(pce_dev);
1504 }
1505};
1506
1507static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1508 unsigned int result, struct msm_dmov_errdata *err)
1509{
1510 struct qce_device *pce_dev;
1511
1512 pce_dev = (struct qce_device *) cmd_ptr->user;
1513 if (result != ADM_STATUS_OK) {
1514 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1515 result);
1516 pce_dev->chan_ce_out_status = -1;
1517 } else {
1518 pce_dev->chan_ce_out_status = 0;
1519 };
1520
1521 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1522 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1523 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1524 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1525
1526 /* done */
1527 _ablk_cipher_complete(pce_dev);
1528 }
1529};
1530
1531
1532static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1533 unsigned int result, struct msm_dmov_errdata *err)
1534{
1535 struct qce_device *pce_dev;
1536
1537 pce_dev = (struct qce_device *) cmd_ptr->user;
1538 if (result != ADM_STATUS_OK) {
1539 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1540 result);
1541 pce_dev->chan_ce_in_status = -1;
1542 } else
1543 pce_dev->chan_ce_in_status = 0;
1544
1545 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1546 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1547 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1548 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1549
1550 /* done */
1551 _ablk_cipher_use_pmem_complete(pce_dev);
1552 }
1553};
1554
1555static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1556 unsigned int result, struct msm_dmov_errdata *err)
1557{
1558 struct qce_device *pce_dev;
1559
1560 pce_dev = (struct qce_device *) cmd_ptr->user;
1561 if (result != ADM_STATUS_OK) {
1562 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1563 result);
1564 pce_dev->chan_ce_out_status = -1;
1565 } else {
1566 pce_dev->chan_ce_out_status = 0;
1567 };
1568
1569 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1570 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1571 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1572 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1573
1574 /* done */
1575 _ablk_cipher_use_pmem_complete(pce_dev);
1576 }
1577};
1578
1579static int _setup_cmd_template(struct qce_device *pce_dev)
1580{
1581 dmov_sg *pcmd;
1582 dmov_s *pscmd;
1583 struct dmov_desc *pdesc;
1584 unsigned char *vaddr;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001585 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586
1587 /* Divide up the 4K coherent memory */
1588 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1589 vaddr = pce_dev->coh_vmem;
1590 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1591 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1592 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1593 (vaddr - pce_dev->coh_vmem);
1594 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1595
1596 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001597 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001598 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1599 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1600 (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001601 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602
1603 /*
1604 * 3. ce_in channel command list of one scatter gather command
1605 * and one simple command.
1606 */
1607 pce_dev->cmd_list_ce_in = vaddr;
1608 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1609 + (vaddr - pce_dev->coh_vmem);
1610 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1611
1612 /* 4. authentication result. */
1613 pce_dev->dig_result = vaddr;
1614 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1615 (vaddr - pce_dev->coh_vmem);
1616 vaddr = vaddr + SHA256_DIGESTSIZE;
1617
1618 /*
1619 * 5. ce_out channel command list of one scatter gather command
1620 * and one simple command.
1621 */
1622 pce_dev->cmd_list_ce_out = vaddr;
1623 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1624 + (vaddr - pce_dev->coh_vmem);
1625 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1626
1627 /* 6. ce_out channel command src descriptors, 1 entry */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001628 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001629 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1630 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1631 + (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001632 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633
1634 /* 7. ce_out channel command dst descriptors, 128 entries. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001635 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001636 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1637 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1638 + (vaddr - pce_dev->coh_vmem);
1639 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1640
1641 /* 8. pad area. */
1642 pce_dev->ce_pad = vaddr;
1643 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1644 (vaddr - pce_dev->coh_vmem);
1645 vaddr = vaddr + ADM_CE_BLOCK_SIZE;
1646
1647 /* 9. ce_in channel command pointer list. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001648 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1650 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1651 (vaddr - pce_dev->coh_vmem);
1652 vaddr = vaddr + sizeof(unsigned char *);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653
1654 /* 10. ce_ou channel command pointer list. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001655 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1657 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1658 (vaddr - pce_dev->coh_vmem);
1659 vaddr = vaddr + sizeof(unsigned char *);
1660
1661 /* 11. throw away area to store by-pass data from ce_out. */
1662 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1663 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1664 + (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001665 pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001666 pce_dev->coh_vmem); /* at least 1.5 K of space */
1667 /*
1668 * The first command of command list ce_in is for the input of
1669 * concurrent operation of encrypt/decrypt or for the input
1670 * of authentication.
1671 */
1672 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1673 /* swap byte and half word , dst crci , scatter gather */
1674 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1675 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1676 pdesc = pce_dev->ce_in_src_desc;
1677 pdesc->addr = 0; /* to be filled in each operation */
1678 pdesc->len = 0; /* to be filled in each operation */
1679 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 pdesc = pce_dev->ce_in_dst_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001682 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1683 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1684 pdesc->len = 0; /* to be filled in each operation */
1685 pdesc++;
1686 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1688 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1689 DST_INDEX_SG_CMD(0);
1690 pcmd++;
1691 /*
1692 * The second command is for the digested data of
1693 * hashing operation only. For others, this command is not used.
1694 */
1695 pscmd = (dmov_s *) pcmd;
1696 /* last command, swap byte, half word, src crci, single */
1697 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1698 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1699 pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1700 pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
1701 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1702 /* setup command pointer list */
1703 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1704 DMOV_CMD_ADDR((unsigned int)
1705 pce_dev->phy_cmd_list_ce_in));
1706 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1707 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1708 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1709 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001710 /*
1711 * The first command in the command list ce_out.
1712 * It is for encry/decryp output.
1713 * If hashing only, ce_out is not used.
1714 */
1715 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1716 /* swap byte, half word, source crci, scatter gather */
1717 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1718 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001719
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001720 pdesc = pce_dev->ce_out_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001721 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1722 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1723 pdesc->len = 0; /* to be filled in each operation */
1724 pdesc++;
1725 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001726 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001728 pdesc = pce_dev->ce_out_dst_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001729 pdesc->addr = 0; /* to be filled in each operation */
1730 pdesc->len = 0; /* to be filled in each operation */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1732 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1733 DST_INDEX_SG_CMD(0);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001734
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 pcmd++;
1736 /*
1737 * The second command is for digested data of esp operation.
1738 * For ciphering, this command is not used.
1739 */
1740 pscmd = (dmov_s *) pcmd;
1741 /* last command, swap byte, half word, src crci, single */
1742 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1743 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1744 pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1745 pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
1746 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1747 /* setup command pointer list */
1748 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1749 DMOV_CMD_ADDR((unsigned int)pce_dev->
1750 phy_cmd_list_ce_out));
1751
1752 pce_dev->chan_ce_out_cmd->user = pce_dev;
1753 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1754 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1755 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001756
1757
1758 return 0;
1759};
1760
1761static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1762{
1763
1764 if (ce_in)
1765 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1766 else
1767 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1768
1769 if (ce_out)
1770 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1771 else
1772 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1773
1774 if (ce_in)
1775 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1776 pce_dev->chan_ce_in_cmd);
1777 if (ce_out)
1778 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1779 pce_dev->chan_ce_out_cmd);
1780
1781 return 0;
1782};
1783
1784static void _f9_complete(struct qce_device *pce_dev)
1785{
1786 uint32_t mac_i;
1787 uint32_t status;
1788
1789 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1790 pce_dev->ota_size, DMA_TO_DEVICE);
1791
1792 /* check ce error status */
1793 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1794 if (status & (1 << CRYPTO_SW_ERR)) {
1795 pce_dev->err++;
1796 dev_err(pce_dev->pdev,
1797 "Qualcomm Crypto Error at 0x%x, status%x\n",
1798 pce_dev->phy_iobase, status);
1799 _init_ce_engine(pce_dev);
1800 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1801 return;
1802 };
1803
1804 mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1805 pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
1806 pce_dev->chan_ce_in_status);
1807};
1808
1809static void _f8_complete(struct qce_device *pce_dev)
1810{
1811 uint32_t status;
1812
1813 if (pce_dev->phy_ota_dst != 0)
1814 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
1815 pce_dev->ota_size, DMA_FROM_DEVICE);
1816 if (pce_dev->phy_ota_src != 0)
1817 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1818 pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
1819 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
1820
1821 /* check ce error status */
1822 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1823 if (status & (1 << CRYPTO_SW_ERR)) {
1824 pce_dev->err++;
1825 dev_err(pce_dev->pdev,
1826 "Qualcomm Crypto Error at 0x%x, status%x\n",
1827 pce_dev->phy_iobase, status);
1828 _init_ce_engine(pce_dev);
1829 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1830 return;
1831 };
1832
1833 pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
1834 pce_dev->chan_ce_in_status |
1835 pce_dev->chan_ce_out_status);
1836};
1837
1838
1839static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1840 unsigned int result, struct msm_dmov_errdata *err)
1841{
1842 struct qce_device *pce_dev;
1843
1844 pce_dev = (struct qce_device *) cmd_ptr->user;
1845 if (result != ADM_STATUS_OK) {
1846 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1847 result);
1848 pce_dev->chan_ce_in_status = -1;
1849 } else
1850 pce_dev->chan_ce_in_status = 0;
1851 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1852 _f9_complete(pce_dev);
1853};
1854
1855static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1856 unsigned int result, struct msm_dmov_errdata *err)
1857{
1858 struct qce_device *pce_dev;
1859
1860 pce_dev = (struct qce_device *) cmd_ptr->user;
1861 if (result != ADM_STATUS_OK) {
1862 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1863 result);
1864 pce_dev->chan_ce_in_status = -1;
1865 } else
1866 pce_dev->chan_ce_in_status = 0;
1867
1868 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1869 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1870 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1871 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1872
1873 /* done */
1874 _f8_complete(pce_dev);
1875 }
1876};
1877
1878static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1879 unsigned int result, struct msm_dmov_errdata *err)
1880{
1881 struct qce_device *pce_dev;
1882
1883 pce_dev = (struct qce_device *) cmd_ptr->user;
1884 if (result != ADM_STATUS_OK) {
1885 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1886 result);
1887 pce_dev->chan_ce_out_status = -1;
1888 } else {
1889 pce_dev->chan_ce_out_status = 0;
1890 };
1891
1892 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1893 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1894 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1895 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1896
1897 /* done */
1898 _f8_complete(pce_dev);
1899 }
1900};
1901
1902static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
1903{
1904 uint32_t cfg;
1905 uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
1906
1907 _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
1908 writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1909 writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
1910 writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
1911 writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
1912 writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
1913
1914 writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1915 writel_relaxed(req->count_i, pce_dev->iobase +
1916 CRYPTO_AUTH_BYTECNT1_REG);
1917
1918 /* write auth_seg_cfg */
1919 writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
1920 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1921
1922 /* write seg_cfg */
1923 cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
1924 (1 << CRYPTO_LAST);
1925
1926 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1927 cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
1928 else
1929 cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
1930
1931 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1932 cfg |= 1 << CRYPTO_F9_DIRECTION;
1933
1934 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1935
1936 /* write seg_size */
1937 writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1938
1939 /* issue go to crypto */
1940 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1941
1942 /*
1943 * barrier to ensure previous instructions
1944 * (including GO) to CE finish before issue DMA transfer
1945 * request.
1946 */
1947 mb();
1948 return 0;
1949};
1950
1951static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1952 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1953 uint16_t cipher_size)
1954{
1955 uint32_t cfg;
1956 uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
1957
1958 if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
1959 (req->bearer >= QCE_OTA_MAX_BEARER))
1960 return -EINVAL;
1961
1962 /* write seg_cfg */
1963 cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
1964 (1 << CRYPTO_LAST);
1965 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1966 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
1967 else
1968 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
1969 if (key_stream_mode)
1970 cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
1971 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1972 cfg |= 1 << CRYPTO_F8_DIRECTION;
1973 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1974
1975 /* write seg_size */
1976 writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1977
1978 /* write 0 to auth_size, auth_offset */
1979 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1980
1981 /* write encr_seg_cfg seg_size, seg_offset */
1982 writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
1983 (cipher_offset & 0xffff),
1984 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1985
1986 /* write keys */
1987 _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
1988 writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
1989 writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
1990 writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
1991 writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
1992
1993 /* write cntr0_iv0 for countC */
1994 writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
1995
1996 /* write cntr1_iv1 for nPkts, and bearer */
1997 if (npkts == 1)
1998 npkts = 0;
1999 writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
2000 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
2001 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
2002
2003 /* issue go to crypto */
2004 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
2005
2006 /*
2007 * barrier to ensure previous instructions
2008 * (including GO) to CE finish before issue DMA transfer
2009 * request.
2010 */
2011 mb();
2012 return 0;
2013};
2014
2015int qce_aead_req(void *handle, struct qce_req *q_req)
2016{
2017 struct qce_device *pce_dev = (struct qce_device *) handle;
2018 struct aead_request *areq = (struct aead_request *) q_req->areq;
2019 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2020 uint32_t ivsize = crypto_aead_ivsize(aead);
2021 uint32_t totallen;
2022 uint32_t pad_len;
2023 uint32_t authsize = crypto_aead_authsize(aead);
2024 int rc = 0;
2025
2026 q_req->ivsize = ivsize;
2027 if (q_req->dir == QCE_ENCRYPT)
2028 q_req->cryptlen = areq->cryptlen;
2029 else
2030 q_req->cryptlen = areq->cryptlen - authsize;
2031
2032 totallen = q_req->cryptlen + ivsize + areq->assoclen;
2033 pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
2034
2035 _chain_buffer_in_init(pce_dev);
2036 _chain_buffer_out_init(pce_dev);
2037
2038 pce_dev->assoc_nents = 0;
2039 pce_dev->phy_iv_in = 0;
2040 pce_dev->src_nents = 0;
2041 pce_dev->dst_nents = 0;
2042
2043 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
2044 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2045 DMA_TO_DEVICE);
2046 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
2047 rc = -ENOMEM;
2048 goto bad;
2049 }
2050
2051 /* cipher iv for input */
2052 pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
2053 ivsize, DMA_TO_DEVICE);
2054 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
2055 rc = -ENOMEM;
2056 goto bad;
2057 }
2058
2059 /* for output, ignore associated data and cipher iv */
2060 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
2061 ivsize + areq->assoclen) < 0) {
2062 rc = -ENOMEM;
2063 goto bad;
2064 }
2065
2066 /* cipher input */
2067 pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
2068 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2069 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2070 DMA_TO_DEVICE);
2071 if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
2072 rc = -ENOMEM;
2073 goto bad;
2074 }
2075
2076 /* cipher output */
2077 if (areq->src != areq->dst) {
2078 pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
2079 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2080 DMA_FROM_DEVICE);
2081 };
2082 if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
2083 rc = -ENOMEM;
2084 goto bad;
2085 }
2086
2087 /* pad data */
2088 if (pad_len) {
2089 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2090 pad_len) < 0) {
2091 rc = -ENOMEM;
2092 goto bad;
2093 }
2094 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2095 pad_len) < 0) {
2096 rc = -ENOMEM;
2097 goto bad;
2098 }
2099 }
2100
2101 /* finalize the ce_in and ce_out channels command lists */
2102 _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2103 _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2104
2105 /* set up crypto device */
2106 rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
2107 if (rc < 0)
2108 goto bad;
2109
2110 /* setup for callback, and issue command to adm */
2111 pce_dev->areq = q_req->areq;
2112 pce_dev->qce_cb = q_req->qce_cb;
2113
2114 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
2115 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
2116
2117 rc = _qce_start_dma(pce_dev, true, true);
2118 if (rc == 0)
2119 return 0;
2120bad:
2121 if (pce_dev->assoc_nents) {
2122 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2123 DMA_TO_DEVICE);
2124 }
2125 if (pce_dev->phy_iv_in) {
2126 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
2127 ivsize, DMA_TO_DEVICE);
2128 }
2129 if (pce_dev->src_nents) {
2130 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2131 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2132 DMA_TO_DEVICE);
2133 }
2134 if (pce_dev->dst_nents) {
2135 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2136 DMA_FROM_DEVICE);
2137 }
2138 return rc;
2139}
2140EXPORT_SYMBOL(qce_aead_req);
2141
2142int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
2143{
2144 int rc = 0;
2145 struct qce_device *pce_dev = (struct qce_device *) handle;
2146 struct ablkcipher_request *areq = (struct ablkcipher_request *)
2147 c_req->areq;
2148
2149 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
2150 - areq->nbytes;
2151
2152 _chain_buffer_in_init(pce_dev);
2153 _chain_buffer_out_init(pce_dev);
2154
2155 pce_dev->src_nents = 0;
2156 pce_dev->dst_nents = 0;
2157 /* cipher input */
2158 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
2159
2160 if (c_req->use_pmem != 1)
2161 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2162 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2163 DMA_TO_DEVICE);
2164 else
2165 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
2166 areq->src);
2167
2168 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
2169 rc = -ENOMEM;
2170 goto bad;
2171 }
2172
2173 /* cipher output */
2174 if (areq->src != areq->dst) {
2175 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
2176 if (c_req->use_pmem != 1)
2177 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2178 DMA_FROM_DEVICE);
2179 else
2180 dma_map_pmem_sg(&c_req->pmem->dst[0],
2181 pce_dev->dst_nents, areq->dst);
2182 };
2183 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
2184 rc = -ENOMEM;
2185 goto bad;
2186 }
2187
2188 /* pad data */
2189 if (pad_len) {
2190 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2191 pad_len) < 0) {
2192 rc = -ENOMEM;
2193 goto bad;
2194 }
2195 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2196 pad_len) < 0) {
2197 rc = -ENOMEM;
2198 goto bad;
2199 }
2200 }
2201
2202 /* finalize the ce_in and ce_out channels command lists */
2203 _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
2204 _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
2205
2206#ifdef QCE_DEBUG
2207 _ce_in_dump(pce_dev);
2208 _ce_out_dump(pce_dev);
2209#endif
2210 /* set up crypto device */
2211 rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
2212 if (rc < 0)
2213 goto bad;
2214
2215 /* setup for callback, and issue command to adm */
2216 pce_dev->areq = areq;
2217 pce_dev->qce_cb = c_req->qce_cb;
2218 if (c_req->use_pmem == 1) {
2219 pce_dev->chan_ce_in_cmd->complete_func =
2220 _ablk_cipher_ce_in_call_back_pmem;
2221 pce_dev->chan_ce_out_cmd->complete_func =
2222 _ablk_cipher_ce_out_call_back_pmem;
2223 } else {
2224 pce_dev->chan_ce_in_cmd->complete_func =
2225 _ablk_cipher_ce_in_call_back;
2226 pce_dev->chan_ce_out_cmd->complete_func =
2227 _ablk_cipher_ce_out_call_back;
2228 }
2229 rc = _qce_start_dma(pce_dev, true, true);
2230
2231 if (rc == 0)
2232 return 0;
2233bad:
2234 if (c_req->use_pmem != 1) {
2235 if (pce_dev->dst_nents) {
2236 dma_unmap_sg(pce_dev->pdev, areq->dst,
2237 pce_dev->dst_nents, DMA_FROM_DEVICE);
2238 }
2239 if (pce_dev->src_nents) {
2240 dma_unmap_sg(pce_dev->pdev, areq->src,
2241 pce_dev->src_nents,
2242 (areq->src == areq->dst) ?
2243 DMA_BIDIRECTIONAL :
2244 DMA_TO_DEVICE);
2245 }
2246 }
2247 return rc;
2248}
2249EXPORT_SYMBOL(qce_ablk_cipher_req);
2250
2251int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
2252{
2253 struct qce_device *pce_dev = (struct qce_device *) handle;
2254 int rc;
2255 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
2256 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
2257
2258 _chain_buffer_in_init(pce_dev);
2259 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
2260 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
2261 DMA_TO_DEVICE);
2262
2263 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
2264 rc = -ENOMEM;
2265 goto bad;
2266 }
2267
2268 if (pad_len) {
2269 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2270 pad_len) < 0) {
2271 rc = -ENOMEM;
2272 goto bad;
2273 }
2274 }
2275 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
2276
2277#ifdef QCE_DEBUG
2278 _ce_in_dump(pce_dev);
2279#endif
2280
2281 rc = _sha_ce_setup(pce_dev, sreq);
2282
2283 if (rc < 0)
2284 goto bad;
2285
2286 pce_dev->areq = areq;
2287 pce_dev->qce_cb = sreq->qce_cb;
2288 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
2289
2290 rc = _qce_start_dma(pce_dev, true, false);
2291
2292 if (rc == 0)
2293 return 0;
2294bad:
2295 if (pce_dev->src_nents) {
2296 dma_unmap_sg(pce_dev->pdev, sreq->src,
2297 pce_dev->src_nents, DMA_TO_DEVICE);
2298 }
2299
2300 return rc;
2301}
2302EXPORT_SYMBOL(qce_process_sha_req);
2303
2304/*
2305 * crypto engine open function.
2306 */
2307void *qce_open(struct platform_device *pdev, int *rc)
2308{
2309 struct qce_device *pce_dev;
2310 struct resource *resource;
2311 struct clk *ce_clk;
2312
2313 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
2314 if (!pce_dev) {
2315 *rc = -ENOMEM;
2316 dev_err(&pdev->dev, "Can not allocate memory\n");
2317 return NULL;
2318 }
2319 pce_dev->pdev = &pdev->dev;
Matt Wagantallc4b3a4d2011-08-17 16:58:39 -07002320 ce_clk = clk_get(pce_dev->pdev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002321 if (IS_ERR(ce_clk)) {
Mona Hossaina8657d82011-07-11 16:30:08 -07002322 kfree(pce_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002323 *rc = PTR_ERR(ce_clk);
2324 return NULL;
2325 }
2326 pce_dev->ce_clk = ce_clk;
Mona Hossaina8657d82011-07-11 16:30:08 -07002327 *rc = clk_enable(pce_dev->ce_clk);
2328 if (*rc) {
2329 kfree(pce_dev);
2330 return NULL;
2331 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332
2333 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2334 if (!resource) {
2335 *rc = -ENXIO;
2336 dev_err(pce_dev->pdev, "Missing MEM resource\n");
2337 goto err;
2338 };
2339 pce_dev->phy_iobase = resource->start;
2340 pce_dev->iobase = ioremap_nocache(resource->start,
2341 resource->end - resource->start + 1);
2342 if (!pce_dev->iobase) {
2343 *rc = -ENOMEM;
2344 dev_err(pce_dev->pdev, "Can not map io memory\n");
2345 goto err;
2346 }
2347
2348 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2349 GFP_KERNEL);
2350 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2351 GFP_KERNEL);
2352 if (pce_dev->chan_ce_in_cmd == NULL ||
2353 pce_dev->chan_ce_out_cmd == NULL) {
2354 dev_err(pce_dev->pdev, "Can not allocate memory\n");
2355 *rc = -ENOMEM;
2356 goto err;
2357 }
2358
2359 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2360 "crypto_channels");
2361 if (!resource) {
2362 *rc = -ENXIO;
2363 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
2364 goto err;
2365 };
2366 pce_dev->chan_ce_in = resource->start;
2367 pce_dev->chan_ce_out = resource->end;
2368 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2369 "crypto_crci_in");
2370 if (!resource) {
2371 *rc = -ENXIO;
2372 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
2373 goto err;
2374 };
2375 pce_dev->crci_in = resource->start;
2376 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2377 "crypto_crci_out");
2378 if (!resource) {
2379 *rc = -ENXIO;
2380 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
2381 goto err;
2382 };
2383 pce_dev->crci_out = resource->start;
2384 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2385 "crypto_crci_hash");
2386 if (!resource) {
2387 *rc = -ENXIO;
2388 dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
2389 goto err;
2390 };
2391 pce_dev->crci_hash = resource->start;
2392 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
Mona Hossaind90ea0e2011-08-11 16:51:07 -07002393 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002394
2395 if (pce_dev->coh_vmem == NULL) {
2396 *rc = -ENOMEM;
2397 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
2398 goto err;
2399 }
2400 _setup_cmd_template(pce_dev);
2401
2402 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
2403 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
2404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002405 if (_init_ce_engine(pce_dev)) {
2406 *rc = -ENXIO;
2407 clk_disable(pce_dev->ce_clk);
2408 goto err;
2409 }
2410 *rc = 0;
2411 clk_disable(pce_dev->ce_clk);
2412
2413 pce_dev->err = 0;
2414
2415 return pce_dev;
2416err:
2417 if (pce_dev)
2418 qce_close(pce_dev);
2419 return NULL;
2420}
2421EXPORT_SYMBOL(qce_open);
2422
2423/*
2424 * crypto engine close function.
2425 */
2426int qce_close(void *handle)
2427{
2428 struct qce_device *pce_dev = (struct qce_device *) handle;
2429
2430 if (handle == NULL)
2431 return -ENODEV;
2432 if (pce_dev->iobase)
2433 iounmap(pce_dev->iobase);
2434
2435 if (pce_dev->coh_vmem)
Mona Hossaind90ea0e2011-08-11 16:51:07 -07002436 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437 pce_dev->coh_pmem);
2438 kfree(pce_dev->chan_ce_in_cmd);
2439 kfree(pce_dev->chan_ce_out_cmd);
2440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 clk_put(pce_dev->ce_clk);
Mona Hossain451cf982011-07-13 11:48:14 -07002442 kfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002443 return 0;
2444}
2445EXPORT_SYMBOL(qce_close);
2446
2447int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2448{
2449 struct qce_device *pce_dev = (struct qce_device *) handle;
2450
2451 if (ce_support == NULL)
2452 return -EINVAL;
2453
2454 if (pce_dev->hmac == 1)
2455 ce_support->sha1_hmac_20 = true;
2456 else
2457 ce_support->sha1_hmac_20 = false;
2458 ce_support->sha1_hmac = false;
2459 ce_support->sha256_hmac = false;
2460 ce_support->sha_hmac = false;
2461 ce_support->cmac = false;
2462 ce_support->aes_key_192 = true;
2463 ce_support->aes_xts = false;
2464 ce_support->aes_ccm = false;
2465 ce_support->ota = pce_dev->ota;
2466 return 0;
2467}
2468EXPORT_SYMBOL(qce_hw_support);
2469
2470int qce_f8_req(void *handle, struct qce_f8_req *req,
2471 void *cookie, qce_comp_func_ptr_t qce_cb)
2472{
2473 struct qce_device *pce_dev = (struct qce_device *) handle;
2474 bool key_stream_mode;
2475 dma_addr_t dst;
2476 int rc;
2477 uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
2478 req->data_len;
2479
2480 _chain_buffer_in_init(pce_dev);
2481 _chain_buffer_out_init(pce_dev);
2482
2483 key_stream_mode = (req->data_in == NULL);
2484
2485 /* F8 cipher input */
2486 if (key_stream_mode)
2487 pce_dev->phy_ota_src = 0;
2488 else {
2489 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2490 req->data_in, req->data_len,
2491 (req->data_in == req->data_out) ?
2492 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2493 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2494 req->data_len) < 0) {
2495 pce_dev->phy_ota_dst = 0;
2496 rc = -ENOMEM;
2497 goto bad;
2498 }
2499 }
2500
2501 /* F8 cipher output */
2502 if (req->data_in != req->data_out) {
2503 dst = dma_map_single(pce_dev->pdev, req->data_out,
2504 req->data_len, DMA_FROM_DEVICE);
2505 pce_dev->phy_ota_dst = dst;
2506 } else {
2507 dst = pce_dev->phy_ota_src;
2508 pce_dev->phy_ota_dst = 0;
2509 }
2510 if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
2511 rc = -ENOMEM;
2512 goto bad;
2513 }
2514
2515 pce_dev->ota_size = req->data_len;
2516
2517 /* pad data */
2518 if (pad_len) {
2519 if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
2520 pce_dev->phy_ce_pad, pad_len) < 0) {
2521 rc = -ENOMEM;
2522 goto bad;
2523 }
2524 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2525 pad_len) < 0) {
2526 rc = -ENOMEM;
2527 goto bad;
2528 }
2529 }
2530
2531 /* finalize the ce_in and ce_out channels command lists */
2532 if (!key_stream_mode)
2533 _ce_in_final(pce_dev, 1, req->data_len + pad_len);
2534 _ce_out_final(pce_dev, 1, req->data_len + pad_len);
2535
2536 /* set up crypto device */
2537 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
2538 if (rc < 0)
2539 goto bad;
2540
2541 /* setup for callback, and issue command to adm */
2542 pce_dev->areq = cookie;
2543 pce_dev->qce_cb = qce_cb;
2544
2545 if (!key_stream_mode)
2546 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2547
2548 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2549
2550 rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
2551 if (rc == 0)
2552 return 0;
2553bad:
2554 if (pce_dev->phy_ota_dst != 0)
2555 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
2556 req->data_len, DMA_FROM_DEVICE);
2557 if (pce_dev->phy_ota_src != 0)
2558 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2559 req->data_len,
2560 (req->data_in == req->data_out) ?
2561 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2562 return rc;
2563}
2564EXPORT_SYMBOL(qce_f8_req);
2565
2566int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
2567 void *cookie, qce_comp_func_ptr_t qce_cb)
2568{
2569 struct qce_device *pce_dev = (struct qce_device *) handle;
2570 uint16_t num_pkt = mreq->num_pkt;
2571 uint16_t cipher_start = mreq->cipher_start;
2572 uint16_t cipher_size = mreq->cipher_size;
2573 struct qce_f8_req *req = &mreq->qce_f8_req;
2574 uint32_t total;
2575 uint32_t pad_len;
2576 dma_addr_t dst = 0;
2577 int rc = 0;
2578
2579 total = num_pkt * req->data_len;
2580 pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
2581
2582 _chain_buffer_in_init(pce_dev);
2583 _chain_buffer_out_init(pce_dev);
2584
2585 /* F8 cipher input */
2586 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2587 req->data_in, total,
2588 (req->data_in == req->data_out) ?
2589 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2590 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2591 total) < 0) {
2592 pce_dev->phy_ota_dst = 0;
2593 rc = -ENOMEM;
2594 goto bad;
2595 }
2596 /* F8 cipher output */
2597 if (req->data_in != req->data_out) {
2598 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
2599 DMA_FROM_DEVICE);
2600 pce_dev->phy_ota_dst = dst;
2601 } else {
2602 dst = pce_dev->phy_ota_src;
2603 pce_dev->phy_ota_dst = 0;
2604 }
2605 if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
2606 rc = -ENOMEM;
2607 goto bad;
2608 }
2609
2610 pce_dev->ota_size = total;
2611
2612 /* pad data */
2613 if (pad_len) {
2614 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2615 pad_len) < 0) {
2616 rc = -ENOMEM;
2617 goto bad;
2618 }
2619 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2620 pad_len) < 0) {
2621 rc = -ENOMEM;
2622 goto bad;
2623 }
2624 }
2625
2626 /* finalize the ce_in and ce_out channels command lists */
2627 _ce_in_final(pce_dev, 1, total + pad_len);
2628 _ce_out_final(pce_dev, 1, total + pad_len);
2629
2630
2631 /* set up crypto device */
2632 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
2633 cipher_size);
2634 if (rc)
2635 goto bad ;
2636
2637 /* setup for callback, and issue command to adm */
2638 pce_dev->areq = cookie;
2639 pce_dev->qce_cb = qce_cb;
2640
2641 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2642 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2643
2644 rc = _qce_start_dma(pce_dev, true, true);
2645 if (rc == 0)
2646 return 0;
2647bad:
2648 if (pce_dev->phy_ota_dst)
2649 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
2650 DMA_FROM_DEVICE);
2651 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
2652 (req->data_in == req->data_out) ?
2653 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2654 return rc;
2655}
2656EXPORT_SYMBOL(qce_f8_multi_pkt_req);
2657
2658int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
2659 qce_comp_func_ptr_t qce_cb)
2660{
2661 struct qce_device *pce_dev = (struct qce_device *) handle;
2662 int rc;
2663 uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
2664
2665 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
2666 req->msize, DMA_TO_DEVICE);
2667
2668 _chain_buffer_in_init(pce_dev);
2669 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
2670 if (rc < 0) {
2671 rc = -ENOMEM;
2672 goto bad;
2673 }
2674
2675 pce_dev->ota_size = req->msize;
2676 if (pad_len) {
2677 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2678 pad_len);
2679 if (rc < 0) {
2680 rc = -ENOMEM;
2681 goto bad;
2682 }
2683 }
2684 _ce_in_final(pce_dev, 2, req->msize + pad_len);
2685 rc = _ce_f9_setup(pce_dev, req);
2686 if (rc < 0)
2687 goto bad;
2688
2689 /* setup for callback, and issue command to adm */
2690 pce_dev->areq = cookie;
2691 pce_dev->qce_cb = qce_cb;
2692
2693 pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
2694
2695 rc = _qce_start_dma(pce_dev, true, false);
2696 if (rc == 0)
2697 return 0;
2698bad:
2699 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2700 req->msize, DMA_TO_DEVICE);
2701 return rc;
2702}
2703EXPORT_SYMBOL(qce_f9_req);
2704
2705MODULE_LICENSE("GPL v2");
2706MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2707MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossain6f8108f2011-09-13 12:45:09 -07002708MODULE_VERSION("1.15");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709