blob: 3e60ba909434ff666977ae98cda31af122b47d00 [file] [log] [blame]
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +10001/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
45 *
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
49 */
50struct req_progress {
51 struct sg_mapping_iter src_sg_it;
52 struct sg_mapping_iter dst_sg_it;
53
54 /* src mostly */
55 int sg_src_left;
56 int src_start;
57 int crypt_len;
58 /* dst mostly */
59 int sg_dst_left;
60 int dst_start;
61 int total_req_bytes;
62};
63
64struct crypto_priv {
65 void __iomem *reg;
66 void __iomem *sram;
67 int irq;
68 struct task_struct *queue_th;
69
70 /* the lock protects queue and eng_st */
71 spinlock_t lock;
72 struct crypto_queue queue;
73 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req;
75 struct req_progress p;
76 int max_req_size;
77 int sram_size;
78};
79
80static struct crypto_priv *cpg;
81
82struct mv_ctx {
83 u8 aes_enc_key[AES_KEY_LEN];
84 u32 aes_dec_key[8];
85 int key_len;
86 u32 need_calc_aes_dkey;
87};
88
89enum crypto_op {
90 COP_AES_ECB,
91 COP_AES_CBC,
92};
93
94struct mv_req_ctx {
95 enum crypto_op op;
96 int decrypt;
97};
98
99static void compute_aes_dec_key(struct mv_ctx *ctx)
100{
101 struct crypto_aes_ctx gen_aes_key;
102 int key_pos;
103
104 if (!ctx->need_calc_aes_dkey)
105 return;
106
107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
108
109 key_pos = ctx->key_len + 24;
110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_256:
113 key_pos -= 2;
114 /* fall */
115 case AES_KEYSIZE_192:
116 key_pos -= 2;
117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
118 4 * 4);
119 break;
120 }
121 ctx->need_calc_aes_dkey = 0;
122}
123
124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
125 unsigned int len)
126{
127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 switch (len) {
131 case AES_KEYSIZE_128:
132 case AES_KEYSIZE_192:
133 case AES_KEYSIZE_256:
134 break;
135 default:
136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 ctx->key_len = len;
140 ctx->need_calc_aes_dkey = 1;
141
142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
143 return 0;
144}
145
146static void setup_data_in(struct ablkcipher_request *req)
147{
148 int ret;
149 void *buf;
150
151 if (!cpg->p.sg_src_left) {
152 ret = sg_miter_next(&cpg->p.src_sg_it);
153 BUG_ON(!ret);
154 cpg->p.sg_src_left = cpg->p.src_sg_it.length;
155 cpg->p.src_start = 0;
156 }
157
158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
159
160 buf = cpg->p.src_sg_it.addr;
161 buf += cpg->p.src_start;
162
163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
164
165 cpg->p.sg_src_left -= cpg->p.crypt_len;
166 cpg->p.src_start += cpg->p.crypt_len;
167}
168
169static void mv_process_current_q(int first_block)
170{
171 struct ablkcipher_request *req = cpg->cur_req;
172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
174 struct sec_accel_config op;
175
176 switch (req_ctx->op) {
177 case COP_AES_ECB:
178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
179 break;
180 case COP_AES_CBC:
181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
184 if (first_block)
185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
186 break;
187 }
188 if (req_ctx->decrypt) {
189 op.config |= CFG_DIR_DEC;
190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
191 AES_KEY_LEN);
192 } else {
193 op.config |= CFG_DIR_ENC;
194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
195 AES_KEY_LEN);
196 }
197
198 switch (ctx->key_len) {
199 case AES_KEYSIZE_128:
200 op.config |= CFG_AES_LEN_128;
201 break;
202 case AES_KEYSIZE_192:
203 op.config |= CFG_AES_LEN_192;
204 break;
205 case AES_KEYSIZE_256:
206 op.config |= CFG_AES_LEN_256;
207 break;
208 }
209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
210 ENC_P_DST(SRAM_DATA_OUT_START);
211 op.enc_key_p = SRAM_DATA_KEY_P;
212
213 setup_data_in(req);
214 op.enc_len = cpg->p.crypt_len;
215 memcpy(cpg->sram + SRAM_CONFIG, &op,
216 sizeof(struct sec_accel_config));
217
218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
219 /* GO */
220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
221
222 /*
223 * XXX: add timer if the interrupt does not occur for some mystery
224 * reason
225 */
226}
227
228static void mv_crypto_algo_completion(void)
229{
230 struct ablkcipher_request *req = cpg->cur_req;
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232
233 if (req_ctx->op != COP_AES_CBC)
234 return ;
235
236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
237}
238
239static void dequeue_complete_req(void)
240{
241 struct ablkcipher_request *req = cpg->cur_req;
242 void *buf;
243 int ret;
244
245 cpg->p.total_req_bytes += cpg->p.crypt_len;
246 do {
247 int dst_copy;
248
249 if (!cpg->p.sg_dst_left) {
250 ret = sg_miter_next(&cpg->p.dst_sg_it);
251 BUG_ON(!ret);
252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
253 cpg->p.dst_start = 0;
254 }
255
256 buf = cpg->p.dst_sg_it.addr;
257 buf += cpg->p.dst_start;
258
259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
260
261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
262
263 cpg->p.sg_dst_left -= dst_copy;
264 cpg->p.crypt_len -= dst_copy;
265 cpg->p.dst_start += dst_copy;
266 } while (cpg->p.crypt_len > 0);
267
268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
269 if (cpg->p.total_req_bytes < req->nbytes) {
270 /* process next scatter list entry */
271 cpg->eng_st = ENGINE_BUSY;
272 mv_process_current_q(0);
273 } else {
274 sg_miter_stop(&cpg->p.src_sg_it);
275 sg_miter_stop(&cpg->p.dst_sg_it);
276 mv_crypto_algo_completion();
277 cpg->eng_st = ENGINE_IDLE;
Uri Simchoni0328ac22010-04-08 19:25:37 +0300278 local_bh_disable();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000279 req->base.complete(&req->base, 0);
Uri Simchoni0328ac22010-04-08 19:25:37 +0300280 local_bh_enable();
Sebastian Andrzej Siewior85a7f0a2009-08-10 12:50:03 +1000281 }
282}
283
284static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
285{
286 int i = 0;
287
288 do {
289 total_bytes -= sl[i].length;
290 i++;
291
292 } while (total_bytes > 0);
293
294 return i;
295}
296
297static void mv_enqueue_new_req(struct ablkcipher_request *req)
298{
299 int num_sgs;
300
301 cpg->cur_req = req;
302 memset(&cpg->p, 0, sizeof(struct req_progress));
303
304 num_sgs = count_sgs(req->src, req->nbytes);
305 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
306
307 num_sgs = count_sgs(req->dst, req->nbytes);
308 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
309 mv_process_current_q(1);
310}
311
312static int queue_manag(void *data)
313{
314 cpg->eng_st = ENGINE_IDLE;
315 do {
316 struct ablkcipher_request *req;
317 struct crypto_async_request *async_req = NULL;
318 struct crypto_async_request *backlog;
319
320 __set_current_state(TASK_INTERRUPTIBLE);
321
322 if (cpg->eng_st == ENGINE_W_DEQUEUE)
323 dequeue_complete_req();
324
325 spin_lock_irq(&cpg->lock);
326 if (cpg->eng_st == ENGINE_IDLE) {
327 backlog = crypto_get_backlog(&cpg->queue);
328 async_req = crypto_dequeue_request(&cpg->queue);
329 if (async_req) {
330 BUG_ON(cpg->eng_st != ENGINE_IDLE);
331 cpg->eng_st = ENGINE_BUSY;
332 }
333 }
334 spin_unlock_irq(&cpg->lock);
335
336 if (backlog) {
337 backlog->complete(backlog, -EINPROGRESS);
338 backlog = NULL;
339 }
340
341 if (async_req) {
342 req = container_of(async_req,
343 struct ablkcipher_request, base);
344 mv_enqueue_new_req(req);
345 async_req = NULL;
346 }
347
348 schedule();
349
350 } while (!kthread_should_stop());
351 return 0;
352}
353
354static int mv_handle_req(struct ablkcipher_request *req)
355{
356 unsigned long flags;
357 int ret;
358
359 spin_lock_irqsave(&cpg->lock, flags);
360 ret = ablkcipher_enqueue_request(&cpg->queue, req);
361 spin_unlock_irqrestore(&cpg->lock, flags);
362 wake_up_process(cpg->queue_th);
363 return ret;
364}
365
366static int mv_enc_aes_ecb(struct ablkcipher_request *req)
367{
368 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
369
370 req_ctx->op = COP_AES_ECB;
371 req_ctx->decrypt = 0;
372
373 return mv_handle_req(req);
374}
375
376static int mv_dec_aes_ecb(struct ablkcipher_request *req)
377{
378 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
379 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
380
381 req_ctx->op = COP_AES_ECB;
382 req_ctx->decrypt = 1;
383
384 compute_aes_dec_key(ctx);
385 return mv_handle_req(req);
386}
387
388static int mv_enc_aes_cbc(struct ablkcipher_request *req)
389{
390 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
391
392 req_ctx->op = COP_AES_CBC;
393 req_ctx->decrypt = 0;
394
395 return mv_handle_req(req);
396}
397
398static int mv_dec_aes_cbc(struct ablkcipher_request *req)
399{
400 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
401 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
402
403 req_ctx->op = COP_AES_CBC;
404 req_ctx->decrypt = 1;
405
406 compute_aes_dec_key(ctx);
407 return mv_handle_req(req);
408}
409
410static int mv_cra_init(struct crypto_tfm *tfm)
411{
412 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
413 return 0;
414}
415
416irqreturn_t crypto_int(int irq, void *priv)
417{
418 u32 val;
419
420 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
421 if (!(val & SEC_INT_ACCEL0_DONE))
422 return IRQ_NONE;
423
424 val &= ~SEC_INT_ACCEL0_DONE;
425 writel(val, cpg->reg + FPGA_INT_STATUS);
426 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
427 BUG_ON(cpg->eng_st != ENGINE_BUSY);
428 cpg->eng_st = ENGINE_W_DEQUEUE;
429 wake_up_process(cpg->queue_th);
430 return IRQ_HANDLED;
431}
432
433struct crypto_alg mv_aes_alg_ecb = {
434 .cra_name = "ecb(aes)",
435 .cra_driver_name = "mv-ecb-aes",
436 .cra_priority = 300,
437 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
438 .cra_blocksize = 16,
439 .cra_ctxsize = sizeof(struct mv_ctx),
440 .cra_alignmask = 0,
441 .cra_type = &crypto_ablkcipher_type,
442 .cra_module = THIS_MODULE,
443 .cra_init = mv_cra_init,
444 .cra_u = {
445 .ablkcipher = {
446 .min_keysize = AES_MIN_KEY_SIZE,
447 .max_keysize = AES_MAX_KEY_SIZE,
448 .setkey = mv_setkey_aes,
449 .encrypt = mv_enc_aes_ecb,
450 .decrypt = mv_dec_aes_ecb,
451 },
452 },
453};
454
455struct crypto_alg mv_aes_alg_cbc = {
456 .cra_name = "cbc(aes)",
457 .cra_driver_name = "mv-cbc-aes",
458 .cra_priority = 300,
459 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
460 .cra_blocksize = AES_BLOCK_SIZE,
461 .cra_ctxsize = sizeof(struct mv_ctx),
462 .cra_alignmask = 0,
463 .cra_type = &crypto_ablkcipher_type,
464 .cra_module = THIS_MODULE,
465 .cra_init = mv_cra_init,
466 .cra_u = {
467 .ablkcipher = {
468 .ivsize = AES_BLOCK_SIZE,
469 .min_keysize = AES_MIN_KEY_SIZE,
470 .max_keysize = AES_MAX_KEY_SIZE,
471 .setkey = mv_setkey_aes,
472 .encrypt = mv_enc_aes_cbc,
473 .decrypt = mv_dec_aes_cbc,
474 },
475 },
476};
477
478static int mv_probe(struct platform_device *pdev)
479{
480 struct crypto_priv *cp;
481 struct resource *res;
482 int irq;
483 int ret;
484
485 if (cpg) {
486 printk(KERN_ERR "Second crypto dev?\n");
487 return -EEXIST;
488 }
489
490 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
491 if (!res)
492 return -ENXIO;
493
494 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
495 if (!cp)
496 return -ENOMEM;
497
498 spin_lock_init(&cp->lock);
499 crypto_init_queue(&cp->queue, 50);
500 cp->reg = ioremap(res->start, res->end - res->start + 1);
501 if (!cp->reg) {
502 ret = -ENOMEM;
503 goto err;
504 }
505
506 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
507 if (!res) {
508 ret = -ENXIO;
509 goto err_unmap_reg;
510 }
511 cp->sram_size = res->end - res->start + 1;
512 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
513 cp->sram = ioremap(res->start, cp->sram_size);
514 if (!cp->sram) {
515 ret = -ENOMEM;
516 goto err_unmap_reg;
517 }
518
519 irq = platform_get_irq(pdev, 0);
520 if (irq < 0 || irq == NO_IRQ) {
521 ret = irq;
522 goto err_unmap_sram;
523 }
524 cp->irq = irq;
525
526 platform_set_drvdata(pdev, cp);
527 cpg = cp;
528
529 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
530 if (IS_ERR(cp->queue_th)) {
531 ret = PTR_ERR(cp->queue_th);
532 goto err_thread;
533 }
534
535 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
536 cp);
537 if (ret)
538 goto err_unmap_sram;
539
540 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
541 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
542
543 ret = crypto_register_alg(&mv_aes_alg_ecb);
544 if (ret)
545 goto err_reg;
546
547 ret = crypto_register_alg(&mv_aes_alg_cbc);
548 if (ret)
549 goto err_unreg_ecb;
550 return 0;
551err_unreg_ecb:
552 crypto_unregister_alg(&mv_aes_alg_ecb);
553err_thread:
554 free_irq(irq, cp);
555err_reg:
556 kthread_stop(cp->queue_th);
557err_unmap_sram:
558 iounmap(cp->sram);
559err_unmap_reg:
560 iounmap(cp->reg);
561err:
562 kfree(cp);
563 cpg = NULL;
564 platform_set_drvdata(pdev, NULL);
565 return ret;
566}
567
568static int mv_remove(struct platform_device *pdev)
569{
570 struct crypto_priv *cp = platform_get_drvdata(pdev);
571
572 crypto_unregister_alg(&mv_aes_alg_ecb);
573 crypto_unregister_alg(&mv_aes_alg_cbc);
574 kthread_stop(cp->queue_th);
575 free_irq(cp->irq, cp);
576 memset(cp->sram, 0, cp->sram_size);
577 iounmap(cp->sram);
578 iounmap(cp->reg);
579 kfree(cp);
580 cpg = NULL;
581 return 0;
582}
583
584static struct platform_driver marvell_crypto = {
585 .probe = mv_probe,
586 .remove = mv_remove,
587 .driver = {
588 .owner = THIS_MODULE,
589 .name = "mv_crypto",
590 },
591};
592MODULE_ALIAS("platform:mv_crypto");
593
594static int __init mv_crypto_init(void)
595{
596 return platform_driver_register(&marvell_crypto);
597}
598module_init(mv_crypto_init);
599
600static void __exit mv_crypto_exit(void)
601{
602 platform_driver_unregister(&marvell_crypto);
603}
604module_exit(mv_crypto_exit);
605
606MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
607MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
608MODULE_LICENSE("GPL");