blob: 641b7fddeca809657abeb9c86b2490154a2a48d8 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Qualcomm Crypto Engine driver.
2 *
3 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mod_devicetable.h>
18#include <linux/device.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/dma-mapping.h>
22#include <linux/io.h>
23#include <linux/platform_device.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/crypto.h>
27#include <crypto/hash.h>
28#include <crypto/sha.h>
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070029#include <linux/qcedev.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <linux/qcota.h>
31#include <mach/dma.h>
32
Mona Hossain5c8ea1f2011-07-28 15:11:29 -070033#include "qce.h"
34#include "qcryptohw_30.h"
35#include "qce_ota.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
37/* ADM definitions */
38#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
39#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
40#define DST_INDEX_SG_CMD(index) (index & 0x3fff)
41#define ADM_DESC_LAST (1 << 31)
42
43/* Data xfer between DM and CE in blocks of 16 bytes */
44#define ADM_CE_BLOCK_SIZE 16
45
Mona Hossaind90ea0e2011-08-11 16:51:07 -070046#define QCE_FIFO_SIZE 0x8000
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048/* Data xfer between DM and CE in blocks of 64 bytes */
49#define ADM_SHA_BLOCK_SIZE 64
50
51#define ADM_DESC_LENGTH_MASK 0xffff
52#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
53
54struct dmov_desc {
55 uint32_t addr;
56 uint32_t len;
57};
58
59#define ADM_STATUS_OK 0x80000002
60
61/* Misc definitions */
62
63/* QCE max number of descriptor in a descriptor list */
64#define QCE_MAX_NUM_DESC 128
65
66/* State of DM channel */
67enum qce_chan_st_enum {
68 QCE_CHAN_STATE_IDLE = 0,
69 QCE_CHAN_STATE_IN_PROG = 1,
70 QCE_CHAN_STATE_COMP = 2,
71 QCE_CHAN_STATE_LAST
72};
73
74/*
75 * CE HW device structure.
76 * Each engine has an instance of the structure.
77 * Each engine can only handle one crypto operation at one time. It is up to
78 * the sw above to ensure single threading of operation on an engine.
79 */
80struct qce_device {
81 struct device *pdev; /* Handle to platform_device structure */
82 unsigned char *coh_vmem; /* Allocated coherent virtual memory */
83 dma_addr_t coh_pmem; /* Allocated coherent physical memory */
84 void __iomem *iobase; /* Virtual io base of CE HW */
85 unsigned int phy_iobase; /* Physical io base of CE HW */
86 struct clk *ce_clk; /* Handle to CE clk */
87 unsigned int crci_in; /* CRCI for CE DM IN Channel */
88 unsigned int crci_out; /* CRCI for CE DM OUT Channel */
89 unsigned int crci_hash; /* CRCI for CE HASH */
90 unsigned int chan_ce_in; /* ADM channel used for CE input
91 * and auth result if authentication
92 * only operation. */
93 unsigned int chan_ce_out; /* ADM channel used for CE output,
94 and icv for esp */
95
96
97 unsigned int *cmd_pointer_list_ce_in;
98 dma_addr_t phy_cmd_pointer_list_ce_in;
99
100 unsigned int *cmd_pointer_list_ce_out;
101 dma_addr_t phy_cmd_pointer_list_ce_out;
102
103 unsigned char *cmd_list_ce_in;
104 dma_addr_t phy_cmd_list_ce_in;
105
106 unsigned char *cmd_list_ce_out;
107 dma_addr_t phy_cmd_list_ce_out;
108
109 struct dmov_desc *ce_out_src_desc;
110 dma_addr_t phy_ce_out_src_desc;
111
112 struct dmov_desc *ce_out_dst_desc;
113 dma_addr_t phy_ce_out_dst_desc;
114
115 struct dmov_desc *ce_in_src_desc;
116 dma_addr_t phy_ce_in_src_desc;
117
118 struct dmov_desc *ce_in_dst_desc;
119 dma_addr_t phy_ce_in_dst_desc;
120
121 unsigned char *ce_out_ignore;
122 dma_addr_t phy_ce_out_ignore;
123
124 unsigned char *ce_pad;
125 dma_addr_t phy_ce_pad;
126
127 struct msm_dmov_cmd *chan_ce_in_cmd;
128 struct msm_dmov_cmd *chan_ce_out_cmd;
129
130 uint32_t ce_out_ignore_size;
131
132 int ce_out_dst_desc_index;
Mona Hossaind90ea0e2011-08-11 16:51:07 -0700133 int ce_in_dst_desc_index;
134
135 int ce_out_src_desc_index;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136 int ce_in_src_desc_index;
137
138 enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
139 enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
140
141 int chan_ce_in_status; /* chan ce_in status */
142 int chan_ce_out_status; /* chan ce_out status */
143
144
145 unsigned char *dig_result;
146 dma_addr_t phy_dig_result;
147
148 /* cached aes key */
149 uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
150
151 uint32_t aes_key_size; /* cached aes key size in bytes */
152 int fastaes; /* ce supports fast aes */
153 int hmac; /* ce support hmac-sha1 */
154 bool ota; /* ce support ota */
155
156 qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
157
158 int assoc_nents;
159 int src_nents;
160 int dst_nents;
161
162 void *areq;
163 enum qce_cipher_mode_enum mode;
164
165 dma_addr_t phy_iv_in;
166 dma_addr_t phy_ota_src;
167 dma_addr_t phy_ota_dst;
168 unsigned int ota_size;
169 int err;
170};
171
172/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
173static uint32_t _std_init_vector_sha1[] = {
174 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
175};
176/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
177static uint32_t _std_init_vector_sha256[] = {
178 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
179 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
180};
181
182/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
183static const uint32_t _s_box[256] = {
184 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
185 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
186
187 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
188 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
189
190 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
191 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
192
193 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
194 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
195
196 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
197 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
198
199 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
200 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
201
202 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
203 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
204
205 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
206 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
207
208 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
209 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
210
211 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
212 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
213
214 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
215 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
216
217 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
218 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
219
220 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
221 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
222
223 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
224 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
225
226 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
227 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
228
229 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
230 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
231
232
233/*
234 * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
235 * Expansion.
236 */
237static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
238 uint32_t *AES_RND_KEY)
239{
240 uint32_t i;
241 uint32_t Nk;
242 uint32_t Nr, rot_data;
243 uint32_t Rcon = 0x01000000;
244 uint32_t temp;
245 uint32_t data_in;
246 uint32_t MSB_store;
247 uint32_t byte_for_sub;
248 uint32_t word_sub[4];
249
250 switch (keysize) {
251 case 192:
252 Nk = 6;
253 Nr = 12;
254 break;
255
256 case 256:
257 Nk = 8;
258 Nr = 14;
259 break;
260
261 case 128:
262 default: /* default to AES128 */
263 Nk = 4;
264 Nr = 10;
265 break;
266 }
267
268 /* key expansion */
269 i = 0;
270 while (i < Nk) {
271 AES_RND_KEY[i] = AES_KEY[i];
272 i = i + 1;
273 }
274
275 i = Nk;
276 while (i < (4 * (Nr + 1))) {
277 temp = AES_RND_KEY[i-1];
278 if (Nr == 14) {
279 switch (i) {
280 case 8:
281 Rcon = 0x01000000;
282 break;
283
284 case 16:
285 Rcon = 0x02000000;
286 break;
287
288 case 24:
289 Rcon = 0x04000000;
290 break;
291
292 case 32:
293 Rcon = 0x08000000;
294 break;
295
296 case 40:
297 Rcon = 0x10000000;
298 break;
299
300 case 48:
301 Rcon = 0x20000000;
302 break;
303
304 case 56:
305 Rcon = 0x40000000;
306 break;
307 }
308 } else if (Nr == 12) {
309 switch (i) {
310 case 6:
311 Rcon = 0x01000000;
312 break;
313
314 case 12:
315 Rcon = 0x02000000;
316 break;
317
318 case 18:
319 Rcon = 0x04000000;
320 break;
321
322 case 24:
323 Rcon = 0x08000000;
324 break;
325
326 case 30:
327 Rcon = 0x10000000;
328 break;
329
330 case 36:
331 Rcon = 0x20000000;
332 break;
333
334 case 42:
335 Rcon = 0x40000000;
336 break;
337
338 case 48:
339 Rcon = 0x80000000;
340 break;
341 }
342 } else if (Nr == 10) {
343 switch (i) {
344 case 4:
345 Rcon = 0x01000000;
346 break;
347
348 case 8:
349 Rcon = 0x02000000;
350 break;
351
352 case 12:
353 Rcon = 0x04000000;
354 break;
355
356 case 16:
357 Rcon = 0x08000000;
358 break;
359
360 case 20:
361 Rcon = 0x10000000;
362 break;
363
364 case 24:
365 Rcon = 0x20000000;
366 break;
367
368 case 28:
369 Rcon = 0x40000000;
370 break;
371
372 case 32:
373 Rcon = 0x80000000;
374 break;
375
376 case 36:
377 Rcon = 0x1b000000;
378 break;
379
380 case 40:
381 Rcon = 0x36000000;
382 break;
383 }
384 }
385
386 if ((i % Nk) == 0) {
387 data_in = temp;
388 MSB_store = (data_in >> 24 & 0xff);
389 rot_data = (data_in << 8) | MSB_store;
390 byte_for_sub = rot_data;
391 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
392 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
393 << 8);
394 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
395 << 16);
396 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
397 >> 24)] << 24);
398 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
399 word_sub[3];
400 temp = word_sub[0] ^ Rcon;
401 } else if ((Nk > 6) && ((i % Nk) == 4)) {
402 byte_for_sub = temp;
403 word_sub[0] = _s_box[(byte_for_sub & 0xff)];
404 word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
405 << 8);
406 word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
407 << 16);
408 word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
409 24)] << 24);
410 word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
411 word_sub[3];
412 temp = word_sub[0];
413 }
414
415 AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
416 i = i+1;
417 }
418}
419
420static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
421 unsigned int len)
422{
423 unsigned n;
424
425 n = len / sizeof(uint32_t) ;
426 for (; n > 0; n--) {
427 *iv = ((*b << 24) & 0xff000000) |
428 (((*(b+1)) << 16) & 0xff0000) |
429 (((*(b+2)) << 8) & 0xff00) |
430 (*(b+3) & 0xff);
431 b += sizeof(uint32_t);
432 iv++;
433 }
434
435 n = len % sizeof(uint32_t);
436 if (n == 3) {
437 *iv = ((*b << 24) & 0xff000000) |
438 (((*(b+1)) << 16) & 0xff0000) |
439 (((*(b+2)) << 8) & 0xff00) ;
440 } else if (n == 2) {
441 *iv = ((*b << 24) & 0xff000000) |
442 (((*(b+1)) << 16) & 0xff0000) ;
443 } else if (n == 1) {
444 *iv = ((*b << 24) & 0xff000000) ;
445 }
446}
447
448static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
449 unsigned int len)
450{
451 unsigned n = len / sizeof(uint32_t);
452
453 for (; n > 0; n--) {
454 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
455 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
456 *b++ = (unsigned char) ((*iv >> 8) & 0xff);
457 *b++ = (unsigned char) (*iv & 0xff);
458 iv++;
459 }
460 n = len % sizeof(uint32_t);
461 if (n == 3) {
462 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
463 *b++ = (unsigned char) ((*iv >> 16) & 0xff);
464 *b = (unsigned char) ((*iv >> 8) & 0xff);
465 } else if (n == 2) {
466 *b++ = (unsigned char) ((*iv >> 24) & 0xff);
467 *b = (unsigned char) ((*iv >> 16) & 0xff);
468 } else if (n == 1) {
469 *b = (unsigned char) ((*iv >> 24) & 0xff);
470 }
471}
472
473static int count_sg(struct scatterlist *sg, int nbytes)
474{
475 int i;
476
477 for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
478 nbytes -= sg->length;
479 return i;
480}
481
482static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries,
483 struct scatterlist *sg)
484{
485 int i = 0;
486 for (i = 0; i < entries; i++) {
487
488 sg->dma_address = (dma_addr_t)pmem->offset;
489 sg++;
490 pmem++;
491 }
492 return 0;
493}
494
495static int _probe_ce_engine(struct qce_device *pce_dev)
496{
497 unsigned int val;
498 unsigned int rev;
499 unsigned int eng_availability; /* engine available functions */
500
501 val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
502 if ((val & 0xfffffff) != 0x0200004) {
503 dev_err(pce_dev->pdev,
504 "unknown Qualcomm crypto device at 0x%x 0x%x\n",
505 pce_dev->phy_iobase, val);
506 return -EIO;
507 };
508 rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
509 if (rev == 0x2) {
510 dev_info(pce_dev->pdev,
511 "Qualcomm Crypto 3e device found at 0x%x\n",
512 pce_dev->phy_iobase);
513 } else if (rev == 0x1) {
514 dev_info(pce_dev->pdev,
515 "Qualcomm Crypto 3 device found at 0x%x\n",
516 pce_dev->phy_iobase);
517 } else if (rev == 0x0) {
518 dev_info(pce_dev->pdev,
519 "Qualcomm Crypto 2 device found at 0x%x\n",
520 pce_dev->phy_iobase);
521 } else {
522 dev_err(pce_dev->pdev,
523 "unknown Qualcomm crypto device at 0x%x\n",
524 pce_dev->phy_iobase);
525 return -EIO;
526 }
527
528 eng_availability = readl_relaxed(pce_dev->iobase +
529 CRYPTO_ENGINES_AVAIL);
530
531 if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
532 == CRYPTO_AES_SEL_FAST)
533 pce_dev->fastaes = 1;
534 else
535 pce_dev->fastaes = 0;
536
537 if (eng_availability & (1 << CRYPTO_HMAC_SEL))
538 pce_dev->hmac = 1;
539 else
540 pce_dev->hmac = 0;
541
542 if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
543 (eng_availability & (1 << CRYPTO_F8_SEL)))
544 pce_dev->ota = true;
545 else
546 pce_dev->ota = false;
547
548 pce_dev->aes_key_size = 0;
549
550 return 0;
551};
552
553static int _init_ce_engine(struct qce_device *pce_dev)
554{
555 unsigned int val;
556
557 /* reset qce */
558 writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
559
560 /* Ensure previous instruction (write to reset bit)
561 * was completed.
562 */
563 mb();
564 /* configure ce */
565 val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
566 (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
567 (1 << CRYPTO_MASK_ERR_INTR);
568 writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
569
570 if (_probe_ce_engine(pce_dev) < 0)
571 return -EIO;
572 if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
573 dev_err(pce_dev->pdev,
574 "unknown Qualcomm crypto device at 0x%x\n",
575 pce_dev->phy_iobase);
576 return -EIO;
577 };
578 return 0;
579};
580
581static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
582{
583 uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
584 uint32_t diglen;
585 int rc;
586 int i;
587 uint32_t cfg = 0;
588
589 /* if not the last, the size has to be on the block boundary */
590 if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
591 return -EIO;
592
593 switch (sreq->alg) {
594 case QCE_HASH_SHA1:
595 diglen = SHA1_DIGEST_SIZE;
596 break;
597 case QCE_HASH_SHA256:
598 diglen = SHA256_DIGEST_SIZE;
599 break;
600 default:
601 return -EINVAL;
602 }
603 /*
604 * write 20/32 bytes, 5/8 words into auth_iv
605 * for SHA1/SHA256
606 */
607
608 if (sreq->first_blk) {
609 if (sreq->alg == QCE_HASH_SHA1) {
610 for (i = 0; i < 5; i++)
611 auth32[i] = _std_init_vector_sha1[i];
612 } else {
613 for (i = 0; i < 8; i++)
614 auth32[i] = _std_init_vector_sha256[i];
615 }
616 } else
617 _byte_stream_to_net_words(auth32, sreq->digest, diglen);
618
619 rc = clk_enable(pce_dev->ce_clk);
620 if (rc)
621 return rc;
622
623 writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
624 writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
625 writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
626 writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
627 writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
628
629 if (sreq->alg == QCE_HASH_SHA256) {
630 writel_relaxed(auth32[5], pce_dev->iobase +
631 CRYPTO_AUTH_IV5_REG);
632 writel_relaxed(auth32[6], pce_dev->iobase +
633 CRYPTO_AUTH_IV6_REG);
634 writel_relaxed(auth32[7], pce_dev->iobase +
635 CRYPTO_AUTH_IV7_REG);
636 }
637 /* write auth_bytecnt 0/1, start with 0 */
638 writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
639 CRYPTO_AUTH_BYTECNT0_REG);
640 writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
641 CRYPTO_AUTH_BYTECNT1_REG);
642
643 /* write auth_seg_cfg */
644 writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
645 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
646
647 /*
648 * write seg_cfg
649 */
650
651 if (sreq->alg == QCE_HASH_SHA1)
652 cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
653 else
654 cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
655
656 if (sreq->first_blk)
657 cfg |= 1 << CRYPTO_FIRST;
658 if (sreq->last_blk)
659 cfg |= 1 << CRYPTO_LAST;
660 cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
661 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
662
663 /* write seg_size */
664 writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
665
666 /* issue go to crypto */
667 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
668 /* Ensure previous instructions (setting the GO register)
669 * was completed before issuing a DMA transfer request
670 */
671 mb();
672
673 return 0;
674}
675
676static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
677 uint32_t totallen, uint32_t coffset)
678{
679 uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
680 0, 0, 0, 0, 0};
681 uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
682 0, 0, 0, 0, 0, 0, 0, 0};
683 uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
684 0, 0, 0, 0};
685 uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
686 int aes_key_chg;
687 int i, rc;
688 uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
689 uint32_t cfg;
690 uint32_t ivsize = q_req->ivsize;
691
692 rc = clk_enable(pce_dev->ce_clk);
693 if (rc)
694 return rc;
695
696 cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
697 if (q_req->op == QCE_REQ_AEAD) {
698
699 /* do authentication setup */
700
701 cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
702 (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
703
704 /* write sha1 init vector */
705 writel_relaxed(_std_init_vector_sha1[0],
706 pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
707 writel_relaxed(_std_init_vector_sha1[1],
708 pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
709 writel_relaxed(_std_init_vector_sha1[2],
710 pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
711 writel_relaxed(_std_init_vector_sha1[3],
712 pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
713 writel_relaxed(_std_init_vector_sha1[4],
714 pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
715 /* write hmac key */
716 _byte_stream_to_net_words(hmackey, q_req->authkey,
717 q_req->authklen);
718 writel_relaxed(hmackey[0], pce_dev->iobase +
719 CRYPTO_AUTH_IV5_REG);
720 writel_relaxed(hmackey[1], pce_dev->iobase +
721 CRYPTO_AUTH_IV6_REG);
722 writel_relaxed(hmackey[2], pce_dev->iobase +
723 CRYPTO_AUTH_IV7_REG);
724 writel_relaxed(hmackey[3], pce_dev->iobase +
725 CRYPTO_AUTH_IV8_REG);
726 writel_relaxed(hmackey[4], pce_dev->iobase +
727 CRYPTO_AUTH_IV9_REG);
728 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
729 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
730
731 /* write auth_seg_cfg */
732 writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
733 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
734
735 }
736
737 _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
738
739 switch (q_req->mode) {
740 case QCE_MODE_ECB:
741 cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
742 break;
743
744 case QCE_MODE_CBC:
745 cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
746 break;
747
748 case QCE_MODE_CTR:
749 default:
750 cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
751 break;
752 }
753 pce_dev->mode = q_req->mode;
754
755 switch (q_req->alg) {
756 case CIPHER_ALG_DES:
757 if (q_req->mode != QCE_MODE_ECB) {
758 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
759 writel_relaxed(enciv32[0], pce_dev->iobase +
760 CRYPTO_CNTR0_IV0_REG);
761 writel_relaxed(enciv32[1], pce_dev->iobase +
762 CRYPTO_CNTR1_IV1_REG);
763 }
764 writel_relaxed(enckey32[0], pce_dev->iobase +
765 CRYPTO_DES_KEY0_REG);
766 writel_relaxed(enckey32[1], pce_dev->iobase +
767 CRYPTO_DES_KEY1_REG);
768 cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
769 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
770 break;
771
772 case CIPHER_ALG_3DES:
773 if (q_req->mode != QCE_MODE_ECB) {
774 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
775 writel_relaxed(enciv32[0], pce_dev->iobase +
776 CRYPTO_CNTR0_IV0_REG);
777 writel_relaxed(enciv32[1], pce_dev->iobase +
778 CRYPTO_CNTR1_IV1_REG);
779 }
780 writel_relaxed(enckey32[0], pce_dev->iobase +
781 CRYPTO_DES_KEY0_REG);
782 writel_relaxed(enckey32[1], pce_dev->iobase +
783 CRYPTO_DES_KEY1_REG);
784 writel_relaxed(enckey32[2], pce_dev->iobase +
785 CRYPTO_DES_KEY2_REG);
786 writel_relaxed(enckey32[3], pce_dev->iobase +
787 CRYPTO_DES_KEY3_REG);
788 writel_relaxed(enckey32[4], pce_dev->iobase +
789 CRYPTO_DES_KEY4_REG);
790 writel_relaxed(enckey32[5], pce_dev->iobase +
791 CRYPTO_DES_KEY5_REG);
792 cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
793 (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
794 break;
795
796 case CIPHER_ALG_AES:
797 default:
798 if (q_req->mode != QCE_MODE_ECB) {
799 _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
800 writel_relaxed(enciv32[0], pce_dev->iobase +
801 CRYPTO_CNTR0_IV0_REG);
802 writel_relaxed(enciv32[1], pce_dev->iobase +
803 CRYPTO_CNTR1_IV1_REG);
804 writel_relaxed(enciv32[2], pce_dev->iobase +
805 CRYPTO_CNTR2_IV2_REG);
806 writel_relaxed(enciv32[3], pce_dev->iobase +
807 CRYPTO_CNTR3_IV3_REG);
808 }
809 /* set number of counter bits */
810 writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
811
812 if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
813 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
814 CRYPTO_ENCR_KEY_SZ);
815 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
816 } else {
817 switch (q_req->encklen) {
818 case AES128_KEY_SIZE:
819 cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
820 CRYPTO_ENCR_KEY_SZ);
821 break;
822 case AES192_KEY_SIZE:
823 cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
824 CRYPTO_ENCR_KEY_SZ);
825 break;
826 case AES256_KEY_SIZE:
827 default:
828 cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
829 CRYPTO_ENCR_KEY_SZ);
830
831 /* check for null key. If null, use hw key*/
832 for (i = 0; i < enck_size_in_word; i++) {
833 if (enckey32[i] != 0)
834 break;
835 }
836 if (i == enck_size_in_word)
837 cfg |= 1 << CRYPTO_USE_HW_KEY;
838 break;
839 } /* end of switch (q_req->encklen) */
840
841 cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
842 if (pce_dev->aes_key_size != q_req->encklen)
843 aes_key_chg = 1;
844 else {
845 for (i = 0; i < enck_size_in_word; i++) {
846 if (enckey32[i] != pce_dev->aeskey[i])
847 break;
848 }
849 aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
850 }
851
852 if (aes_key_chg) {
853 if (pce_dev->fastaes) {
854 for (i = 0; i < enck_size_in_word;
855 i++) {
856 writel_relaxed(enckey32[i],
857 pce_dev->iobase +
858 CRYPTO_AES_RNDKEY0 +
859 (i * sizeof(uint32_t)));
860 }
861 } else {
862 /* size in bit */
863 _aes_expand_key_schedule(
864 q_req->encklen * 8,
865 enckey32, aes_round_key);
866
867 for (i = 0; i < CRYPTO_AES_RNDKEYS;
868 i++) {
869 writel_relaxed(aes_round_key[i],
870 pce_dev->iobase +
871 CRYPTO_AES_RNDKEY0 +
872 (i * sizeof(uint32_t)));
873 }
874 }
875
876 pce_dev->aes_key_size = q_req->encklen;
877 for (i = 0; i < enck_size_in_word; i++)
878 pce_dev->aeskey[i] = enckey32[i];
879 } /*if (aes_key_chg) { */
880 } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
881 break;
882 } /* end of switch (q_req->mode) */
883
884 if (q_req->dir == QCE_ENCRYPT)
885 cfg |= (1 << CRYPTO_AUTH_POS);
886 cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
887
888 /* write encr seg cfg */
889 writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
890 (coffset & 0xffff), /* cipher offset */
891 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
892
893 /* write seg cfg and size */
894 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
895 writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
896
897 /* issue go to crypto */
898 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
899 /* Ensure previous instructions (setting the GO register)
900 * was completed before issuing a DMA transfer request
901 */
902 mb();
903 return 0;
904};
905
906static int _aead_complete(struct qce_device *pce_dev)
907{
908 struct aead_request *areq;
909 struct crypto_aead *aead;
910 uint32_t ivsize;
911 uint32_t iv_out[4];
912 unsigned char iv[4 * sizeof(uint32_t)];
913 uint32_t status;
914
915 areq = (struct aead_request *) pce_dev->areq;
916 aead = crypto_aead_reqtfm(areq);
917 ivsize = crypto_aead_ivsize(aead);
918
919 if (areq->src != areq->dst) {
920 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
921 DMA_FROM_DEVICE);
922 }
923 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
924 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
925 DMA_TO_DEVICE);
926 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
927 ivsize, DMA_TO_DEVICE);
928 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
929 DMA_TO_DEVICE);
930
931 /* check ce error status */
932 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
933 if (status & (1 << CRYPTO_SW_ERR)) {
934 pce_dev->err++;
935 dev_err(pce_dev->pdev,
936 "Qualcomm Crypto Error at 0x%x, status%x\n",
937 pce_dev->phy_iobase, status);
938 _init_ce_engine(pce_dev);
939 clk_disable(pce_dev->ce_clk);
940 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
941 return 0;
942 };
943
944 /* get iv out */
945 if (pce_dev->mode == QCE_MODE_ECB) {
946 clk_disable(pce_dev->ce_clk);
947 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
948 pce_dev->chan_ce_in_status |
949 pce_dev->chan_ce_out_status);
950 } else {
951
952 iv_out[0] = readl_relaxed(pce_dev->iobase +
953 CRYPTO_CNTR0_IV0_REG);
954 iv_out[1] = readl_relaxed(pce_dev->iobase +
955 CRYPTO_CNTR1_IV1_REG);
956 iv_out[2] = readl_relaxed(pce_dev->iobase +
957 CRYPTO_CNTR2_IV2_REG);
958 iv_out[3] = readl_relaxed(pce_dev->iobase +
959 CRYPTO_CNTR3_IV3_REG);
960
961 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
962 clk_disable(pce_dev->ce_clk);
963 pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
964 pce_dev->chan_ce_in_status |
965 pce_dev->chan_ce_out_status);
966 };
967 return 0;
968};
969
970static void _sha_complete(struct qce_device *pce_dev)
971{
972
973 struct ahash_request *areq;
974 uint32_t auth_data[2];
975 uint32_t status;
976
977 areq = (struct ahash_request *) pce_dev->areq;
978 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
979 DMA_TO_DEVICE);
980
981 /* check ce error status */
982 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
983 if (status & (1 << CRYPTO_SW_ERR)) {
984 pce_dev->err++;
985 dev_err(pce_dev->pdev,
986 "Qualcomm Crypto Error at 0x%x, status%x\n",
987 pce_dev->phy_iobase, status);
988 _init_ce_engine(pce_dev);
989 clk_disable(pce_dev->ce_clk);
990 pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
991 return;
992 };
993
994 auth_data[0] = readl_relaxed(pce_dev->iobase +
995 CRYPTO_AUTH_BYTECNT0_REG);
996 auth_data[1] = readl_relaxed(pce_dev->iobase +
997 CRYPTO_AUTH_BYTECNT1_REG);
998 /* Ensure previous instruction (retriving byte count information)
999 * was completed before disabling the clk.
1000 */
1001 mb();
1002 clk_disable(pce_dev->ce_clk);
1003 pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
1004 pce_dev->chan_ce_in_status);
1005};
1006
1007static int _ablk_cipher_complete(struct qce_device *pce_dev)
1008{
1009 struct ablkcipher_request *areq;
1010 uint32_t iv_out[4];
1011 unsigned char iv[4 * sizeof(uint32_t)];
1012 uint32_t status;
1013
1014 areq = (struct ablkcipher_request *) pce_dev->areq;
1015
1016 if (areq->src != areq->dst) {
1017 dma_unmap_sg(pce_dev->pdev, areq->dst,
1018 pce_dev->dst_nents, DMA_FROM_DEVICE);
1019 }
1020 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
1021 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
1022 DMA_TO_DEVICE);
1023
1024 /* check ce error status */
1025 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1026 if (status & (1 << CRYPTO_SW_ERR)) {
1027 pce_dev->err++;
1028 dev_err(pce_dev->pdev,
1029 "Qualcomm Crypto Error at 0x%x, status%x\n",
1030 pce_dev->phy_iobase, status);
1031 _init_ce_engine(pce_dev);
1032 clk_disable(pce_dev->ce_clk);
1033 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1034 return 0;
1035 };
1036
1037 /* get iv out */
1038 if (pce_dev->mode == QCE_MODE_ECB) {
1039 clk_disable(pce_dev->ce_clk);
1040 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1041 pce_dev->chan_ce_out_status);
1042 } else {
1043 iv_out[0] = readl_relaxed(pce_dev->iobase +
1044 CRYPTO_CNTR0_IV0_REG);
1045 iv_out[1] = readl_relaxed(pce_dev->iobase +
1046 CRYPTO_CNTR1_IV1_REG);
1047 iv_out[2] = readl_relaxed(pce_dev->iobase +
1048 CRYPTO_CNTR2_IV2_REG);
1049 iv_out[3] = readl_relaxed(pce_dev->iobase +
1050 CRYPTO_CNTR3_IV3_REG);
1051
1052 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1053 clk_disable(pce_dev->ce_clk);
1054 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1055 pce_dev->chan_ce_out_status);
1056 }
1057
1058 return 0;
1059};
1060
1061static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev)
1062{
1063 struct ablkcipher_request *areq;
1064 uint32_t iv_out[4];
1065 unsigned char iv[4 * sizeof(uint32_t)];
1066 uint32_t status;
1067
1068 areq = (struct ablkcipher_request *) pce_dev->areq;
1069
1070 /* check ce error status */
1071 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1072 if (status & (1 << CRYPTO_SW_ERR)) {
1073 pce_dev->err++;
1074 dev_err(pce_dev->pdev,
1075 "Qualcomm Crypto Error at 0x%x, status%x\n",
1076 pce_dev->phy_iobase, status);
1077 _init_ce_engine(pce_dev);
1078 clk_disable(pce_dev->ce_clk);
1079 pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
1080 return 0;
1081 };
1082
1083 /* get iv out */
1084 if (pce_dev->mode == QCE_MODE_ECB) {
1085 clk_disable(pce_dev->ce_clk);
1086 pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
1087 pce_dev->chan_ce_out_status);
1088 } else {
1089 iv_out[0] = readl_relaxed(pce_dev->iobase +
1090 CRYPTO_CNTR0_IV0_REG);
1091 iv_out[1] = readl_relaxed(pce_dev->iobase +
1092 CRYPTO_CNTR1_IV1_REG);
1093 iv_out[2] = readl_relaxed(pce_dev->iobase +
1094 CRYPTO_CNTR2_IV2_REG);
1095 iv_out[3] = readl_relaxed(pce_dev->iobase +
1096 CRYPTO_CNTR3_IV3_REG);
1097
1098 _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
1099 clk_disable(pce_dev->ce_clk);
1100 pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
1101 pce_dev->chan_ce_out_status);
1102 }
1103
1104 return 0;
1105};
1106
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001107static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
1108 unsigned int plen, unsigned int paddr, int *index)
1109{
1110 while (plen > QCE_FIFO_SIZE) {
1111 pdesc->len = QCE_FIFO_SIZE;
1112 if (paddr > 0) {
1113 pdesc->addr = paddr;
1114 paddr += QCE_FIFO_SIZE;
1115 }
1116 plen -= pdesc->len;
1117 if (plen > 0) {
1118 *index = (*index) + 1;
1119 if ((*index) >= QCE_MAX_NUM_DESC)
1120 return -ENOMEM;
1121 pdesc++;
1122 }
1123 }
1124 if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) {
1125 pdesc->len = plen;
1126 if (paddr > 0)
1127 pdesc->addr = paddr;
1128 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001130 return 0;
1131}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001132
1133static int _chain_sg_buffer_in(struct qce_device *pce_dev,
1134 struct scatterlist *sg, unsigned int nbytes)
1135{
1136 unsigned int len;
1137 unsigned int dlen;
1138 struct dmov_desc *pdesc;
1139
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001140 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1141 if (nbytes > QCE_FIFO_SIZE)
1142 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1143 &pce_dev->ce_in_dst_desc_index);
1144 else
1145 pdesc->len = nbytes;
1146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1148 /*
1149 * Two consective chunks may be handled by the old
1150 * buffer descriptor.
1151 */
1152 while (nbytes > 0) {
1153 len = min(nbytes, sg_dma_len(sg));
1154 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1155 nbytes -= len;
1156 if (dlen == 0) {
1157 pdesc->addr = sg_dma_address(sg);
1158 pdesc->len = len;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001159 if (pdesc->len > QCE_FIFO_SIZE)
1160 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1161 sg_dma_address(sg),
1162 &pce_dev->ce_in_src_desc_index);
1163 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 pdesc->len = dlen + len;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001165 if (pdesc->len > QCE_FIFO_SIZE)
1166 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1167 pdesc->addr,
1168 &pce_dev->ce_in_src_desc_index);
1169 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 pce_dev->ce_in_src_desc_index++;
1171 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1172 return -ENOMEM;
1173 pdesc++;
1174 pdesc->len = len;
1175 pdesc->addr = sg_dma_address(sg);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001176 if (pdesc->len > QCE_FIFO_SIZE)
1177 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1178 sg_dma_address(sg),
1179 &pce_dev->ce_in_src_desc_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 }
1181 if (nbytes > 0)
1182 sg = sg_next(sg);
1183 }
1184 return 0;
1185}
1186
1187static int _chain_pm_buffer_in(struct qce_device *pce_dev,
1188 unsigned int pmem, unsigned int nbytes)
1189{
1190 unsigned int dlen;
1191 struct dmov_desc *pdesc;
1192
1193 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1194 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1195 if (dlen == 0) {
1196 pdesc->addr = pmem;
1197 pdesc->len = nbytes;
1198 } else if (pmem == (pdesc->addr + dlen)) {
1199 pdesc->len = dlen + nbytes;
1200 } else {
1201 pce_dev->ce_in_src_desc_index++;
1202 if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
1203 return -ENOMEM;
1204 pdesc++;
1205 pdesc->len = nbytes;
1206 pdesc->addr = pmem;
1207 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001208 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1209 pdesc->len += nbytes;
1210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001211 return 0;
1212}
1213
1214static void _chain_buffer_in_init(struct qce_device *pce_dev)
1215{
1216 struct dmov_desc *pdesc;
1217
1218 pce_dev->ce_in_src_desc_index = 0;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001219 pce_dev->ce_in_dst_desc_index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001220 pdesc = pce_dev->ce_in_src_desc;
1221 pdesc->len = 0;
1222}
1223
1224static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1225{
1226 struct dmov_desc *pdesc;
1227 dmov_sg *pcmd;
1228
1229 pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
1230 pdesc->len |= ADM_DESC_LAST;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001231 pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
1232 pdesc->len |= ADM_DESC_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233
1234 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1235 if (ncmd == 1)
1236 pcmd->cmd |= CMD_LC;
1237 else {
1238 dmov_s *pscmd;
1239
1240 pcmd->cmd &= ~CMD_LC;
1241 pcmd++;
1242 pscmd = (dmov_s *)pcmd;
1243 pscmd->cmd |= CMD_LC;
1244 }
1245
1246#ifdef QCE_DEBUG
1247 dev_info(pce_dev->pdev, "_ce_in_final %d\n",
1248 pce_dev->ce_in_src_desc_index);
1249#endif
1250}
1251
1252#ifdef QCE_DEBUG
1253static void _ce_in_dump(struct qce_device *pce_dev)
1254{
1255 int i;
1256 struct dmov_desc *pdesc;
1257
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001258 dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001259 for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
1260 pdesc = pce_dev->ce_in_src_desc + i;
1261 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1262 pdesc->len);
1263 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001264 dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
1265 for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
1266 pdesc = pce_dev->ce_in_dst_desc + i;
1267 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001268 pdesc->len);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001269 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270};
1271
1272static void _ce_out_dump(struct qce_device *pce_dev)
1273{
1274 int i;
1275 struct dmov_desc *pdesc;
1276
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001277 dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
1278 for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
1279 pdesc = pce_dev->ce_out_src_desc + i;
1280 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1281 pdesc->len);
1282 }
1283
1284 dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001285 for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
1286 pdesc = pce_dev->ce_out_dst_desc + i;
1287 dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
1288 pdesc->len);
1289 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290};
1291#endif
1292
1293static int _chain_sg_buffer_out(struct qce_device *pce_dev,
1294 struct scatterlist *sg, unsigned int nbytes)
1295{
1296 unsigned int len;
1297 unsigned int dlen;
1298 struct dmov_desc *pdesc;
1299
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001300 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1301 if (nbytes > QCE_FIFO_SIZE)
1302 qce_split_and_insert_dm_desc(pdesc, nbytes, 0,
1303 &pce_dev->ce_out_src_desc_index);
1304 else
1305 pdesc->len = nbytes;
1306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001307 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1308 /*
1309 * Two consective chunks may be handled by the old
1310 * buffer descriptor.
1311 */
1312 while (nbytes > 0) {
1313 len = min(nbytes, sg_dma_len(sg));
1314 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1315 nbytes -= len;
1316 if (dlen == 0) {
1317 pdesc->addr = sg_dma_address(sg);
1318 pdesc->len = len;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001319 if (pdesc->len > QCE_FIFO_SIZE)
1320 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1321 sg_dma_address(sg),
1322 &pce_dev->ce_out_dst_desc_index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
1324 pdesc->len = dlen + len;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001325 if (pdesc->len > QCE_FIFO_SIZE)
1326 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1327 pdesc->addr,
1328 &pce_dev->ce_out_dst_desc_index);
1329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330 } else {
1331 pce_dev->ce_out_dst_desc_index++;
1332 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1333 return -EIO;
1334 pdesc++;
1335 pdesc->len = len;
1336 pdesc->addr = sg_dma_address(sg);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001337 if (pdesc->len > QCE_FIFO_SIZE)
1338 qce_split_and_insert_dm_desc(pdesc, pdesc->len,
1339 sg_dma_address(sg),
1340 &pce_dev->ce_out_dst_desc_index);
1341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 }
1343 if (nbytes > 0)
1344 sg = sg_next(sg);
1345 }
1346 return 0;
1347}
1348
1349static int _chain_pm_buffer_out(struct qce_device *pce_dev,
1350 unsigned int pmem, unsigned int nbytes)
1351{
1352 unsigned int dlen;
1353 struct dmov_desc *pdesc;
1354
1355 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1356 dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
1357
1358 if (dlen == 0) {
1359 pdesc->addr = pmem;
1360 pdesc->len = nbytes;
1361 } else if (pmem == (pdesc->addr + dlen)) {
1362 pdesc->len = dlen + nbytes;
1363 } else {
1364 pce_dev->ce_out_dst_desc_index++;
1365 if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
1366 return -EIO;
1367 pdesc++;
1368 pdesc->len = nbytes;
1369 pdesc->addr = pmem;
1370 }
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001371 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1372 pdesc->len += nbytes;
1373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001374 return 0;
1375};
1376
1377static void _chain_buffer_out_init(struct qce_device *pce_dev)
1378{
1379 struct dmov_desc *pdesc;
1380
1381 pce_dev->ce_out_dst_desc_index = 0;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001382 pce_dev->ce_out_src_desc_index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001383 pdesc = pce_dev->ce_out_dst_desc;
1384 pdesc->len = 0;
1385};
1386
1387static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
1388{
1389 struct dmov_desc *pdesc;
1390 dmov_sg *pcmd;
1391
1392 pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
1393 pdesc->len |= ADM_DESC_LAST;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001394 pdesc = pce_dev->ce_out_src_desc + pce_dev->ce_out_src_desc_index;
1395 pdesc->len |= ADM_DESC_LAST;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1397 if (ncmd == 1)
1398 pcmd->cmd |= CMD_LC;
1399 else {
1400 dmov_s *pscmd;
1401
1402 pcmd->cmd &= ~CMD_LC;
1403 pcmd++;
1404 pscmd = (dmov_s *)pcmd;
1405 pscmd->cmd |= CMD_LC;
1406 }
1407#ifdef QCE_DEBUG
1408 dev_info(pce_dev->pdev, "_ce_out_final %d\n",
1409 pce_dev->ce_out_dst_desc_index);
1410#endif
1411
1412};
1413
1414static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1415 unsigned int result, struct msm_dmov_errdata *err)
1416{
1417 struct qce_device *pce_dev;
1418
1419 pce_dev = (struct qce_device *) cmd_ptr->user;
1420 if (result != ADM_STATUS_OK) {
1421 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1422 result);
1423 pce_dev->chan_ce_in_status = -1;
1424 } else
1425 pce_dev->chan_ce_in_status = 0;
1426
1427 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1428 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1429 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1430 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1431
1432 /* done */
1433 _aead_complete(pce_dev);
1434 }
1435};
1436
1437static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1438 unsigned int result, struct msm_dmov_errdata *err)
1439{
1440 struct qce_device *pce_dev;
1441
1442 pce_dev = (struct qce_device *) cmd_ptr->user;
1443 if (result != ADM_STATUS_OK) {
1444 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1445 result);
1446 pce_dev->chan_ce_out_status = -1;
1447 } else {
1448 pce_dev->chan_ce_out_status = 0;
1449 };
1450
1451 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1452 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1453 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1454 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1455
1456 /* done */
1457 _aead_complete(pce_dev);
1458 }
1459
1460};
1461
1462static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1463 unsigned int result, struct msm_dmov_errdata *err)
1464{
1465 struct qce_device *pce_dev;
1466
1467 pce_dev = (struct qce_device *) cmd_ptr->user;
1468 if (result != ADM_STATUS_OK) {
1469 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1470 result);
1471 pce_dev->chan_ce_in_status = -1;
1472 } else
1473 pce_dev->chan_ce_in_status = 0;
1474 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1475 _sha_complete(pce_dev);
1476};
1477
1478static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1479 unsigned int result, struct msm_dmov_errdata *err)
1480{
1481 struct qce_device *pce_dev;
1482
1483 pce_dev = (struct qce_device *) cmd_ptr->user;
1484 if (result != ADM_STATUS_OK) {
1485 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1486 result);
1487 pce_dev->chan_ce_in_status = -1;
1488 } else
1489 pce_dev->chan_ce_in_status = 0;
1490
1491 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1492 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1493 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1494 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1495
1496 /* done */
1497 _ablk_cipher_complete(pce_dev);
1498 }
1499};
1500
1501static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1502 unsigned int result, struct msm_dmov_errdata *err)
1503{
1504 struct qce_device *pce_dev;
1505
1506 pce_dev = (struct qce_device *) cmd_ptr->user;
1507 if (result != ADM_STATUS_OK) {
1508 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1509 result);
1510 pce_dev->chan_ce_out_status = -1;
1511 } else {
1512 pce_dev->chan_ce_out_status = 0;
1513 };
1514
1515 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1516 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1517 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1518 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1519
1520 /* done */
1521 _ablk_cipher_complete(pce_dev);
1522 }
1523};
1524
1525
1526static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1527 unsigned int result, struct msm_dmov_errdata *err)
1528{
1529 struct qce_device *pce_dev;
1530
1531 pce_dev = (struct qce_device *) cmd_ptr->user;
1532 if (result != ADM_STATUS_OK) {
1533 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1534 result);
1535 pce_dev->chan_ce_in_status = -1;
1536 } else
1537 pce_dev->chan_ce_in_status = 0;
1538
1539 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1540 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1541 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1542 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1543
1544 /* done */
1545 _ablk_cipher_use_pmem_complete(pce_dev);
1546 }
1547};
1548
1549static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr,
1550 unsigned int result, struct msm_dmov_errdata *err)
1551{
1552 struct qce_device *pce_dev;
1553
1554 pce_dev = (struct qce_device *) cmd_ptr->user;
1555 if (result != ADM_STATUS_OK) {
1556 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1557 result);
1558 pce_dev->chan_ce_out_status = -1;
1559 } else {
1560 pce_dev->chan_ce_out_status = 0;
1561 };
1562
1563 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1564 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1565 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1566 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1567
1568 /* done */
1569 _ablk_cipher_use_pmem_complete(pce_dev);
1570 }
1571};
1572
1573static int _setup_cmd_template(struct qce_device *pce_dev)
1574{
1575 dmov_sg *pcmd;
1576 dmov_s *pscmd;
1577 struct dmov_desc *pdesc;
1578 unsigned char *vaddr;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001579 int i = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580
1581 /* Divide up the 4K coherent memory */
1582 /* 1. ce_in channel 1st command src descriptors, 128 entries */
1583 vaddr = pce_dev->coh_vmem;
1584 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
1585 pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
1586 pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
1587 (vaddr - pce_dev->coh_vmem);
1588 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1589
1590 /* 2. ce_in channel 1st command dst descriptor, 1 entry */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001591 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001592 pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
1593 pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
1594 (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001595 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596
1597 /*
1598 * 3. ce_in channel command list of one scatter gather command
1599 * and one simple command.
1600 */
1601 pce_dev->cmd_list_ce_in = vaddr;
1602 pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
1603 + (vaddr - pce_dev->coh_vmem);
1604 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1605
1606 /* 4. authentication result. */
1607 pce_dev->dig_result = vaddr;
1608 pce_dev->phy_dig_result = pce_dev->coh_pmem +
1609 (vaddr - pce_dev->coh_vmem);
1610 vaddr = vaddr + SHA256_DIGESTSIZE;
1611
1612 /*
1613 * 5. ce_out channel command list of one scatter gather command
1614 * and one simple command.
1615 */
1616 pce_dev->cmd_list_ce_out = vaddr;
1617 pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
1618 + (vaddr - pce_dev->coh_vmem);
1619 vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
1620
1621 /* 6. ce_out channel command src descriptors, 1 entry */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001622 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001623 pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
1624 pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
1625 + (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001626 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001627
1628 /* 7. ce_out channel command dst descriptors, 128 entries. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001629 vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
1631 pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
1632 + (vaddr - pce_dev->coh_vmem);
1633 vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
1634
1635 /* 8. pad area. */
1636 pce_dev->ce_pad = vaddr;
1637 pce_dev->phy_ce_pad = pce_dev->coh_pmem +
1638 (vaddr - pce_dev->coh_vmem);
1639 vaddr = vaddr + ADM_CE_BLOCK_SIZE;
1640
1641 /* 9. ce_in channel command pointer list. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001642 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001643 pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
1644 pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
1645 (vaddr - pce_dev->coh_vmem);
1646 vaddr = vaddr + sizeof(unsigned char *);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647
1648 /* 10. ce_ou channel command pointer list. */
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001649 vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650 pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
1651 pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
1652 (vaddr - pce_dev->coh_vmem);
1653 vaddr = vaddr + sizeof(unsigned char *);
1654
1655 /* 11. throw away area to store by-pass data from ce_out. */
1656 pce_dev->ce_out_ignore = (unsigned char *) vaddr;
1657 pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
1658 + (vaddr - pce_dev->coh_vmem);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001659 pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660 pce_dev->coh_vmem); /* at least 1.5 K of space */
1661 /*
1662 * The first command of command list ce_in is for the input of
1663 * concurrent operation of encrypt/decrypt or for the input
1664 * of authentication.
1665 */
1666 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
1667 /* swap byte and half word , dst crci , scatter gather */
1668 pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
1669 CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
1670 pdesc = pce_dev->ce_in_src_desc;
1671 pdesc->addr = 0; /* to be filled in each operation */
1672 pdesc->len = 0; /* to be filled in each operation */
1673 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001674
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 pdesc = pce_dev->ce_in_dst_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001676 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1677 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1678 pdesc->len = 0; /* to be filled in each operation */
1679 pdesc++;
1680 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
1682 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1683 DST_INDEX_SG_CMD(0);
1684 pcmd++;
1685 /*
1686 * The second command is for the digested data of
1687 * hashing operation only. For others, this command is not used.
1688 */
1689 pscmd = (dmov_s *) pcmd;
1690 /* last command, swap byte, half word, src crci, single */
1691 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1692 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1693 pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1694 pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
1695 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1696 /* setup command pointer list */
1697 *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
1698 DMOV_CMD_ADDR((unsigned int)
1699 pce_dev->phy_cmd_list_ce_in));
1700 pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
1701 pce_dev->chan_ce_in_cmd->exec_func = NULL;
1702 pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
1703 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
1704 pce_dev->chan_ce_in_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1705 pce_dev->crci_in, pce_dev->crci_hash);
1706 /*
1707 * The first command in the command list ce_out.
1708 * It is for encry/decryp output.
1709 * If hashing only, ce_out is not used.
1710 */
1711 pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
1712 /* swap byte, half word, source crci, scatter gather */
1713 pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1714 CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001716 pdesc = pce_dev->ce_out_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001717 for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
1718 pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
1719 pdesc->len = 0; /* to be filled in each operation */
1720 pdesc++;
1721 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 pdesc = pce_dev->ce_out_dst_desc;
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001725 pdesc->addr = 0; /* to be filled in each operation */
1726 pdesc->len = 0; /* to be filled in each operation */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
1728 pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
1729 DST_INDEX_SG_CMD(0);
Mona Hossaind90ea0e2011-08-11 16:51:07 -07001730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731 pcmd++;
1732 /*
1733 * The second command is for digested data of esp operation.
1734 * For ciphering, this command is not used.
1735 */
1736 pscmd = (dmov_s *) pcmd;
1737 /* last command, swap byte, half word, src crci, single */
1738 pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
1739 CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
1740 pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
1741 pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
1742 pscmd->dst = (unsigned) pce_dev->phy_dig_result;
1743 /* setup command pointer list */
1744 *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
1745 DMOV_CMD_ADDR((unsigned int)pce_dev->
1746 phy_cmd_list_ce_out));
1747
1748 pce_dev->chan_ce_out_cmd->user = pce_dev;
1749 pce_dev->chan_ce_out_cmd->exec_func = NULL;
1750 pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
1751 (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
1752 pce_dev->chan_ce_out_cmd->crci_mask = msm_dmov_build_crci_mask(2,
1753 pce_dev->crci_out, pce_dev->crci_hash);
1754
1755
1756 return 0;
1757};
1758
1759static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
1760{
1761
1762 if (ce_in)
1763 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
1764 else
1765 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1766
1767 if (ce_out)
1768 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
1769 else
1770 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1771
1772 if (ce_in)
1773 msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
1774 pce_dev->chan_ce_in_cmd);
1775 if (ce_out)
1776 msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
1777 pce_dev->chan_ce_out_cmd);
1778
1779 return 0;
1780};
1781
1782static void _f9_complete(struct qce_device *pce_dev)
1783{
1784 uint32_t mac_i;
1785 uint32_t status;
1786
1787 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1788 pce_dev->ota_size, DMA_TO_DEVICE);
1789
1790 /* check ce error status */
1791 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1792 if (status & (1 << CRYPTO_SW_ERR)) {
1793 pce_dev->err++;
1794 dev_err(pce_dev->pdev,
1795 "Qualcomm Crypto Error at 0x%x, status%x\n",
1796 pce_dev->phy_iobase, status);
1797 _init_ce_engine(pce_dev);
1798 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1799 return;
1800 };
1801
1802 mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1803 pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
1804 pce_dev->chan_ce_in_status);
1805};
1806
1807static void _f8_complete(struct qce_device *pce_dev)
1808{
1809 uint32_t status;
1810
1811 if (pce_dev->phy_ota_dst != 0)
1812 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
1813 pce_dev->ota_size, DMA_FROM_DEVICE);
1814 if (pce_dev->phy_ota_src != 0)
1815 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
1816 pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
1817 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
1818
1819 /* check ce error status */
1820 status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
1821 if (status & (1 << CRYPTO_SW_ERR)) {
1822 pce_dev->err++;
1823 dev_err(pce_dev->pdev,
1824 "Qualcomm Crypto Error at 0x%x, status%x\n",
1825 pce_dev->phy_iobase, status);
1826 _init_ce_engine(pce_dev);
1827 pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
1828 return;
1829 };
1830
1831 pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
1832 pce_dev->chan_ce_in_status |
1833 pce_dev->chan_ce_out_status);
1834};
1835
1836
1837static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1838 unsigned int result, struct msm_dmov_errdata *err)
1839{
1840 struct qce_device *pce_dev;
1841
1842 pce_dev = (struct qce_device *) cmd_ptr->user;
1843 if (result != ADM_STATUS_OK) {
1844 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1845 result);
1846 pce_dev->chan_ce_in_status = -1;
1847 } else
1848 pce_dev->chan_ce_in_status = 0;
1849 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1850 _f9_complete(pce_dev);
1851};
1852
1853static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
1854 unsigned int result, struct msm_dmov_errdata *err)
1855{
1856 struct qce_device *pce_dev;
1857
1858 pce_dev = (struct qce_device *) cmd_ptr->user;
1859 if (result != ADM_STATUS_OK) {
1860 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1861 result);
1862 pce_dev->chan_ce_in_status = -1;
1863 } else
1864 pce_dev->chan_ce_in_status = 0;
1865
1866 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
1867 if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
1868 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1869 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1870
1871 /* done */
1872 _f8_complete(pce_dev);
1873 }
1874};
1875
1876static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
1877 unsigned int result, struct msm_dmov_errdata *err)
1878{
1879 struct qce_device *pce_dev;
1880
1881 pce_dev = (struct qce_device *) cmd_ptr->user;
1882 if (result != ADM_STATUS_OK) {
1883 dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
1884 result);
1885 pce_dev->chan_ce_out_status = -1;
1886 } else {
1887 pce_dev->chan_ce_out_status = 0;
1888 };
1889
1890 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
1891 if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
1892 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
1893 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
1894
1895 /* done */
1896 _f8_complete(pce_dev);
1897 }
1898};
1899
1900static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req)
1901{
1902 uint32_t cfg;
1903 uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
1904
1905 _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
1906 writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
1907 writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
1908 writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
1909 writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
1910 writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
1911
1912 writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
1913 writel_relaxed(req->count_i, pce_dev->iobase +
1914 CRYPTO_AUTH_BYTECNT1_REG);
1915
1916 /* write auth_seg_cfg */
1917 writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
1918 pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1919
1920 /* write seg_cfg */
1921 cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
1922 (1 << CRYPTO_LAST);
1923
1924 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1925 cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
1926 else
1927 cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ;
1928
1929 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1930 cfg |= 1 << CRYPTO_F9_DIRECTION;
1931
1932 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1933
1934 /* write seg_size */
1935 writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1936
1937 /* issue go to crypto */
1938 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
1939
1940 /*
1941 * barrier to ensure previous instructions
1942 * (including GO) to CE finish before issue DMA transfer
1943 * request.
1944 */
1945 mb();
1946 return 0;
1947};
1948
1949static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
1950 bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
1951 uint16_t cipher_size)
1952{
1953 uint32_t cfg;
1954 uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
1955
1956 if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
1957 (req->bearer >= QCE_OTA_MAX_BEARER))
1958 return -EINVAL;
1959
1960 /* write seg_cfg */
1961 cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
1962 (1 << CRYPTO_LAST);
1963 if (req->algorithm == QCE_OTA_ALGO_KASUMI)
1964 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
1965 else
1966 cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ;
1967 if (key_stream_mode)
1968 cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
1969 if (req->direction == QCE_OTA_DIR_DOWNLINK)
1970 cfg |= 1 << CRYPTO_F8_DIRECTION;
1971 writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
1972
1973 /* write seg_size */
1974 writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
1975
1976 /* write 0 to auth_size, auth_offset */
1977 writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
1978
1979 /* write encr_seg_cfg seg_size, seg_offset */
1980 writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
1981 (cipher_offset & 0xffff),
1982 pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
1983
1984 /* write keys */
1985 _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
1986 writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
1987 writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
1988 writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
1989 writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
1990
1991 /* write cntr0_iv0 for countC */
1992 writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
1993
1994 /* write cntr1_iv1 for nPkts, and bearer */
1995 if (npkts == 1)
1996 npkts = 0;
1997 writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
1998 npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
1999 pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
2000
2001 /* issue go to crypto */
2002 writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
2003
2004 /*
2005 * barrier to ensure previous instructions
2006 * (including GO) to CE finish before issue DMA transfer
2007 * request.
2008 */
2009 mb();
2010 return 0;
2011};
2012
2013int qce_aead_req(void *handle, struct qce_req *q_req)
2014{
2015 struct qce_device *pce_dev = (struct qce_device *) handle;
2016 struct aead_request *areq = (struct aead_request *) q_req->areq;
2017 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2018 uint32_t ivsize = crypto_aead_ivsize(aead);
2019 uint32_t totallen;
2020 uint32_t pad_len;
2021 uint32_t authsize = crypto_aead_authsize(aead);
2022 int rc = 0;
2023
2024 q_req->ivsize = ivsize;
2025 if (q_req->dir == QCE_ENCRYPT)
2026 q_req->cryptlen = areq->cryptlen;
2027 else
2028 q_req->cryptlen = areq->cryptlen - authsize;
2029
2030 totallen = q_req->cryptlen + ivsize + areq->assoclen;
2031 pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
2032
2033 _chain_buffer_in_init(pce_dev);
2034 _chain_buffer_out_init(pce_dev);
2035
2036 pce_dev->assoc_nents = 0;
2037 pce_dev->phy_iv_in = 0;
2038 pce_dev->src_nents = 0;
2039 pce_dev->dst_nents = 0;
2040
2041 pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
2042 dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2043 DMA_TO_DEVICE);
2044 if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
2045 rc = -ENOMEM;
2046 goto bad;
2047 }
2048
2049 /* cipher iv for input */
2050 pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
2051 ivsize, DMA_TO_DEVICE);
2052 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
2053 rc = -ENOMEM;
2054 goto bad;
2055 }
2056
2057 /* for output, ignore associated data and cipher iv */
2058 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
2059 ivsize + areq->assoclen) < 0) {
2060 rc = -ENOMEM;
2061 goto bad;
2062 }
2063
2064 /* cipher input */
2065 pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
2066 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2067 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2068 DMA_TO_DEVICE);
2069 if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
2070 rc = -ENOMEM;
2071 goto bad;
2072 }
2073
2074 /* cipher output */
2075 if (areq->src != areq->dst) {
2076 pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
2077 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2078 DMA_FROM_DEVICE);
2079 };
2080 if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
2081 rc = -ENOMEM;
2082 goto bad;
2083 }
2084
2085 /* pad data */
2086 if (pad_len) {
2087 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2088 pad_len) < 0) {
2089 rc = -ENOMEM;
2090 goto bad;
2091 }
2092 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2093 pad_len) < 0) {
2094 rc = -ENOMEM;
2095 goto bad;
2096 }
2097 }
2098
2099 /* finalize the ce_in and ce_out channels command lists */
2100 _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2101 _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
2102
2103 /* set up crypto device */
2104 rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
2105 if (rc < 0)
2106 goto bad;
2107
2108 /* setup for callback, and issue command to adm */
2109 pce_dev->areq = q_req->areq;
2110 pce_dev->qce_cb = q_req->qce_cb;
2111
2112 pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
2113 pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
2114
2115 rc = _qce_start_dma(pce_dev, true, true);
2116 if (rc == 0)
2117 return 0;
2118bad:
2119 if (pce_dev->assoc_nents) {
2120 dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
2121 DMA_TO_DEVICE);
2122 }
2123 if (pce_dev->phy_iv_in) {
2124 dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
2125 ivsize, DMA_TO_DEVICE);
2126 }
2127 if (pce_dev->src_nents) {
2128 dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2129 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2130 DMA_TO_DEVICE);
2131 }
2132 if (pce_dev->dst_nents) {
2133 dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2134 DMA_FROM_DEVICE);
2135 }
2136 return rc;
2137}
2138EXPORT_SYMBOL(qce_aead_req);
2139
2140int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
2141{
2142 int rc = 0;
2143 struct qce_device *pce_dev = (struct qce_device *) handle;
2144 struct ablkcipher_request *areq = (struct ablkcipher_request *)
2145 c_req->areq;
2146
2147 uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
2148 - areq->nbytes;
2149
2150 _chain_buffer_in_init(pce_dev);
2151 _chain_buffer_out_init(pce_dev);
2152
2153 pce_dev->src_nents = 0;
2154 pce_dev->dst_nents = 0;
2155 /* cipher input */
2156 pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
2157
2158 if (c_req->use_pmem != 1)
2159 dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
2160 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
2161 DMA_TO_DEVICE);
2162 else
2163 dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents,
2164 areq->src);
2165
2166 if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
2167 rc = -ENOMEM;
2168 goto bad;
2169 }
2170
2171 /* cipher output */
2172 if (areq->src != areq->dst) {
2173 pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
2174 if (c_req->use_pmem != 1)
2175 dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
2176 DMA_FROM_DEVICE);
2177 else
2178 dma_map_pmem_sg(&c_req->pmem->dst[0],
2179 pce_dev->dst_nents, areq->dst);
2180 };
2181 if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
2182 rc = -ENOMEM;
2183 goto bad;
2184 }
2185
2186 /* pad data */
2187 if (pad_len) {
2188 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2189 pad_len) < 0) {
2190 rc = -ENOMEM;
2191 goto bad;
2192 }
2193 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2194 pad_len) < 0) {
2195 rc = -ENOMEM;
2196 goto bad;
2197 }
2198 }
2199
2200 /* finalize the ce_in and ce_out channels command lists */
2201 _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
2202 _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
2203
2204#ifdef QCE_DEBUG
2205 _ce_in_dump(pce_dev);
2206 _ce_out_dump(pce_dev);
2207#endif
2208 /* set up crypto device */
2209 rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
2210 if (rc < 0)
2211 goto bad;
2212
2213 /* setup for callback, and issue command to adm */
2214 pce_dev->areq = areq;
2215 pce_dev->qce_cb = c_req->qce_cb;
2216 if (c_req->use_pmem == 1) {
2217 pce_dev->chan_ce_in_cmd->complete_func =
2218 _ablk_cipher_ce_in_call_back_pmem;
2219 pce_dev->chan_ce_out_cmd->complete_func =
2220 _ablk_cipher_ce_out_call_back_pmem;
2221 } else {
2222 pce_dev->chan_ce_in_cmd->complete_func =
2223 _ablk_cipher_ce_in_call_back;
2224 pce_dev->chan_ce_out_cmd->complete_func =
2225 _ablk_cipher_ce_out_call_back;
2226 }
2227 rc = _qce_start_dma(pce_dev, true, true);
2228
2229 if (rc == 0)
2230 return 0;
2231bad:
2232 if (c_req->use_pmem != 1) {
2233 if (pce_dev->dst_nents) {
2234 dma_unmap_sg(pce_dev->pdev, areq->dst,
2235 pce_dev->dst_nents, DMA_FROM_DEVICE);
2236 }
2237 if (pce_dev->src_nents) {
2238 dma_unmap_sg(pce_dev->pdev, areq->src,
2239 pce_dev->src_nents,
2240 (areq->src == areq->dst) ?
2241 DMA_BIDIRECTIONAL :
2242 DMA_TO_DEVICE);
2243 }
2244 }
2245 return rc;
2246}
2247EXPORT_SYMBOL(qce_ablk_cipher_req);
2248
2249int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
2250{
2251 struct qce_device *pce_dev = (struct qce_device *) handle;
2252 int rc;
2253 uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
2254 struct ahash_request *areq = (struct ahash_request *)sreq->areq;
2255
2256 _chain_buffer_in_init(pce_dev);
2257 pce_dev->src_nents = count_sg(sreq->src, sreq->size);
2258 dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
2259 DMA_TO_DEVICE);
2260
2261 if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
2262 rc = -ENOMEM;
2263 goto bad;
2264 }
2265
2266 if (pad_len) {
2267 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2268 pad_len) < 0) {
2269 rc = -ENOMEM;
2270 goto bad;
2271 }
2272 }
2273 _ce_in_final(pce_dev, 2, sreq->size + pad_len);
2274
2275#ifdef QCE_DEBUG
2276 _ce_in_dump(pce_dev);
2277#endif
2278
2279 rc = _sha_ce_setup(pce_dev, sreq);
2280
2281 if (rc < 0)
2282 goto bad;
2283
2284 pce_dev->areq = areq;
2285 pce_dev->qce_cb = sreq->qce_cb;
2286 pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
2287
2288 rc = _qce_start_dma(pce_dev, true, false);
2289
2290 if (rc == 0)
2291 return 0;
2292bad:
2293 if (pce_dev->src_nents) {
2294 dma_unmap_sg(pce_dev->pdev, sreq->src,
2295 pce_dev->src_nents, DMA_TO_DEVICE);
2296 }
2297
2298 return rc;
2299}
2300EXPORT_SYMBOL(qce_process_sha_req);
2301
2302/*
2303 * crypto engine open function.
2304 */
2305void *qce_open(struct platform_device *pdev, int *rc)
2306{
2307 struct qce_device *pce_dev;
2308 struct resource *resource;
2309 struct clk *ce_clk;
2310
2311 pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
2312 if (!pce_dev) {
2313 *rc = -ENOMEM;
2314 dev_err(&pdev->dev, "Can not allocate memory\n");
2315 return NULL;
2316 }
2317 pce_dev->pdev = &pdev->dev;
Matt Wagantallc4b3a4d2011-08-17 16:58:39 -07002318 ce_clk = clk_get(pce_dev->pdev, "core_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002319 if (IS_ERR(ce_clk)) {
Mona Hossaina8657d82011-07-11 16:30:08 -07002320 kfree(pce_dev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002321 *rc = PTR_ERR(ce_clk);
2322 return NULL;
2323 }
2324 pce_dev->ce_clk = ce_clk;
Mona Hossaina8657d82011-07-11 16:30:08 -07002325 *rc = clk_enable(pce_dev->ce_clk);
2326 if (*rc) {
2327 kfree(pce_dev);
2328 return NULL;
2329 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330
2331 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2332 if (!resource) {
2333 *rc = -ENXIO;
2334 dev_err(pce_dev->pdev, "Missing MEM resource\n");
2335 goto err;
2336 };
2337 pce_dev->phy_iobase = resource->start;
2338 pce_dev->iobase = ioremap_nocache(resource->start,
2339 resource->end - resource->start + 1);
2340 if (!pce_dev->iobase) {
2341 *rc = -ENOMEM;
2342 dev_err(pce_dev->pdev, "Can not map io memory\n");
2343 goto err;
2344 }
2345
2346 pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2347 GFP_KERNEL);
2348 pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
2349 GFP_KERNEL);
2350 if (pce_dev->chan_ce_in_cmd == NULL ||
2351 pce_dev->chan_ce_out_cmd == NULL) {
2352 dev_err(pce_dev->pdev, "Can not allocate memory\n");
2353 *rc = -ENOMEM;
2354 goto err;
2355 }
2356
2357 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2358 "crypto_channels");
2359 if (!resource) {
2360 *rc = -ENXIO;
2361 dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
2362 goto err;
2363 };
2364 pce_dev->chan_ce_in = resource->start;
2365 pce_dev->chan_ce_out = resource->end;
2366 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2367 "crypto_crci_in");
2368 if (!resource) {
2369 *rc = -ENXIO;
2370 dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
2371 goto err;
2372 };
2373 pce_dev->crci_in = resource->start;
2374 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2375 "crypto_crci_out");
2376 if (!resource) {
2377 *rc = -ENXIO;
2378 dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
2379 goto err;
2380 };
2381 pce_dev->crci_out = resource->start;
2382 resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
2383 "crypto_crci_hash");
2384 if (!resource) {
2385 *rc = -ENXIO;
2386 dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
2387 goto err;
2388 };
2389 pce_dev->crci_hash = resource->start;
2390 pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
Mona Hossaind90ea0e2011-08-11 16:51:07 -07002391 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002392
2393 if (pce_dev->coh_vmem == NULL) {
2394 *rc = -ENOMEM;
2395 dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
2396 goto err;
2397 }
2398 _setup_cmd_template(pce_dev);
2399
2400 pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
2401 pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
2402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002403 if (_init_ce_engine(pce_dev)) {
2404 *rc = -ENXIO;
2405 clk_disable(pce_dev->ce_clk);
2406 goto err;
2407 }
2408 *rc = 0;
2409 clk_disable(pce_dev->ce_clk);
2410
2411 pce_dev->err = 0;
2412
2413 return pce_dev;
2414err:
2415 if (pce_dev)
2416 qce_close(pce_dev);
2417 return NULL;
2418}
2419EXPORT_SYMBOL(qce_open);
2420
2421/*
2422 * crypto engine close function.
2423 */
2424int qce_close(void *handle)
2425{
2426 struct qce_device *pce_dev = (struct qce_device *) handle;
2427
2428 if (handle == NULL)
2429 return -ENODEV;
2430 if (pce_dev->iobase)
2431 iounmap(pce_dev->iobase);
2432
2433 if (pce_dev->coh_vmem)
Mona Hossaind90ea0e2011-08-11 16:51:07 -07002434 dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002435 pce_dev->coh_pmem);
2436 kfree(pce_dev->chan_ce_in_cmd);
2437 kfree(pce_dev->chan_ce_out_cmd);
2438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002439 clk_put(pce_dev->ce_clk);
Mona Hossain451cf982011-07-13 11:48:14 -07002440 kfree(handle);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 return 0;
2442}
2443EXPORT_SYMBOL(qce_close);
2444
2445int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
2446{
2447 struct qce_device *pce_dev = (struct qce_device *) handle;
2448
2449 if (ce_support == NULL)
2450 return -EINVAL;
2451
2452 if (pce_dev->hmac == 1)
2453 ce_support->sha1_hmac_20 = true;
2454 else
2455 ce_support->sha1_hmac_20 = false;
2456 ce_support->sha1_hmac = false;
2457 ce_support->sha256_hmac = false;
2458 ce_support->sha_hmac = false;
2459 ce_support->cmac = false;
2460 ce_support->aes_key_192 = true;
2461 ce_support->aes_xts = false;
2462 ce_support->aes_ccm = false;
2463 ce_support->ota = pce_dev->ota;
2464 return 0;
2465}
2466EXPORT_SYMBOL(qce_hw_support);
2467
2468int qce_f8_req(void *handle, struct qce_f8_req *req,
2469 void *cookie, qce_comp_func_ptr_t qce_cb)
2470{
2471 struct qce_device *pce_dev = (struct qce_device *) handle;
2472 bool key_stream_mode;
2473 dma_addr_t dst;
2474 int rc;
2475 uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
2476 req->data_len;
2477
2478 _chain_buffer_in_init(pce_dev);
2479 _chain_buffer_out_init(pce_dev);
2480
2481 key_stream_mode = (req->data_in == NULL);
2482
2483 /* F8 cipher input */
2484 if (key_stream_mode)
2485 pce_dev->phy_ota_src = 0;
2486 else {
2487 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2488 req->data_in, req->data_len,
2489 (req->data_in == req->data_out) ?
2490 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2491 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2492 req->data_len) < 0) {
2493 pce_dev->phy_ota_dst = 0;
2494 rc = -ENOMEM;
2495 goto bad;
2496 }
2497 }
2498
2499 /* F8 cipher output */
2500 if (req->data_in != req->data_out) {
2501 dst = dma_map_single(pce_dev->pdev, req->data_out,
2502 req->data_len, DMA_FROM_DEVICE);
2503 pce_dev->phy_ota_dst = dst;
2504 } else {
2505 dst = pce_dev->phy_ota_src;
2506 pce_dev->phy_ota_dst = 0;
2507 }
2508 if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
2509 rc = -ENOMEM;
2510 goto bad;
2511 }
2512
2513 pce_dev->ota_size = req->data_len;
2514
2515 /* pad data */
2516 if (pad_len) {
2517 if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
2518 pce_dev->phy_ce_pad, pad_len) < 0) {
2519 rc = -ENOMEM;
2520 goto bad;
2521 }
2522 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2523 pad_len) < 0) {
2524 rc = -ENOMEM;
2525 goto bad;
2526 }
2527 }
2528
2529 /* finalize the ce_in and ce_out channels command lists */
2530 if (!key_stream_mode)
2531 _ce_in_final(pce_dev, 1, req->data_len + pad_len);
2532 _ce_out_final(pce_dev, 1, req->data_len + pad_len);
2533
2534 /* set up crypto device */
2535 rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
2536 if (rc < 0)
2537 goto bad;
2538
2539 /* setup for callback, and issue command to adm */
2540 pce_dev->areq = cookie;
2541 pce_dev->qce_cb = qce_cb;
2542
2543 if (!key_stream_mode)
2544 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2545
2546 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2547
2548 rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
2549 if (rc == 0)
2550 return 0;
2551bad:
2552 if (pce_dev->phy_ota_dst != 0)
2553 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
2554 req->data_len, DMA_FROM_DEVICE);
2555 if (pce_dev->phy_ota_src != 0)
2556 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2557 req->data_len,
2558 (req->data_in == req->data_out) ?
2559 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2560 return rc;
2561}
2562EXPORT_SYMBOL(qce_f8_req);
2563
2564int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
2565 void *cookie, qce_comp_func_ptr_t qce_cb)
2566{
2567 struct qce_device *pce_dev = (struct qce_device *) handle;
2568 uint16_t num_pkt = mreq->num_pkt;
2569 uint16_t cipher_start = mreq->cipher_start;
2570 uint16_t cipher_size = mreq->cipher_size;
2571 struct qce_f8_req *req = &mreq->qce_f8_req;
2572 uint32_t total;
2573 uint32_t pad_len;
2574 dma_addr_t dst = 0;
2575 int rc = 0;
2576
2577 total = num_pkt * req->data_len;
2578 pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
2579
2580 _chain_buffer_in_init(pce_dev);
2581 _chain_buffer_out_init(pce_dev);
2582
2583 /* F8 cipher input */
2584 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
2585 req->data_in, total,
2586 (req->data_in == req->data_out) ?
2587 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2588 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
2589 total) < 0) {
2590 pce_dev->phy_ota_dst = 0;
2591 rc = -ENOMEM;
2592 goto bad;
2593 }
2594 /* F8 cipher output */
2595 if (req->data_in != req->data_out) {
2596 dst = dma_map_single(pce_dev->pdev, req->data_out, total,
2597 DMA_FROM_DEVICE);
2598 pce_dev->phy_ota_dst = dst;
2599 } else {
2600 dst = pce_dev->phy_ota_src;
2601 pce_dev->phy_ota_dst = 0;
2602 }
2603 if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
2604 rc = -ENOMEM;
2605 goto bad;
2606 }
2607
2608 pce_dev->ota_size = total;
2609
2610 /* pad data */
2611 if (pad_len) {
2612 if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2613 pad_len) < 0) {
2614 rc = -ENOMEM;
2615 goto bad;
2616 }
2617 if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
2618 pad_len) < 0) {
2619 rc = -ENOMEM;
2620 goto bad;
2621 }
2622 }
2623
2624 /* finalize the ce_in and ce_out channels command lists */
2625 _ce_in_final(pce_dev, 1, total + pad_len);
2626 _ce_out_final(pce_dev, 1, total + pad_len);
2627
2628
2629 /* set up crypto device */
2630 rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
2631 cipher_size);
2632 if (rc)
2633 goto bad ;
2634
2635 /* setup for callback, and issue command to adm */
2636 pce_dev->areq = cookie;
2637 pce_dev->qce_cb = qce_cb;
2638
2639 pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
2640 pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
2641
2642 rc = _qce_start_dma(pce_dev, true, true);
2643 if (rc == 0)
2644 return 0;
2645bad:
2646 if (pce_dev->phy_ota_dst)
2647 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
2648 DMA_FROM_DEVICE);
2649 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
2650 (req->data_in == req->data_out) ?
2651 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
2652 return rc;
2653}
2654EXPORT_SYMBOL(qce_f8_multi_pkt_req);
2655
2656int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
2657 qce_comp_func_ptr_t qce_cb)
2658{
2659 struct qce_device *pce_dev = (struct qce_device *) handle;
2660 int rc;
2661 uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
2662
2663 pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
2664 req->msize, DMA_TO_DEVICE);
2665
2666 _chain_buffer_in_init(pce_dev);
2667 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
2668 if (rc < 0) {
2669 rc = -ENOMEM;
2670 goto bad;
2671 }
2672
2673 pce_dev->ota_size = req->msize;
2674 if (pad_len) {
2675 rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
2676 pad_len);
2677 if (rc < 0) {
2678 rc = -ENOMEM;
2679 goto bad;
2680 }
2681 }
2682 _ce_in_final(pce_dev, 2, req->msize + pad_len);
2683 rc = _ce_f9_setup(pce_dev, req);
2684 if (rc < 0)
2685 goto bad;
2686
2687 /* setup for callback, and issue command to adm */
2688 pce_dev->areq = cookie;
2689 pce_dev->qce_cb = qce_cb;
2690
2691 pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
2692
2693 rc = _qce_start_dma(pce_dev, true, false);
2694 if (rc == 0)
2695 return 0;
2696bad:
2697 dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
2698 req->msize, DMA_TO_DEVICE);
2699 return rc;
2700}
2701EXPORT_SYMBOL(qce_f9_req);
2702
2703MODULE_LICENSE("GPL v2");
2704MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
2705MODULE_DESCRIPTION("Crypto Engine driver");
Mona Hossaind90ea0e2011-08-11 16:51:07 -07002706MODULE_VERSION("1.14");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002707