blob: 7dcf28f48149e514b93822b868094329a325c47a [file] [log] [blame]
Yuan Kang045e3672012-06-22 19:48:47 -05001/*
2 * caam - Freescale FSL CAAM support for ahash functions of crypto API
3 *
4 * Copyright 2011 Freescale Semiconductor, Inc.
5 *
6 * Based on caamalg.c crypto API driver.
7 *
8 * relationship of digest job descriptor or first job descriptor after init to
9 * shared descriptors:
10 *
11 * --------------- ---------------
12 * | JobDesc #1 |-------------------->| ShareDesc |
13 * | *(packet 1) | | (hashKey) |
14 * --------------- | (operation) |
15 * ---------------
16 *
17 * relationship of subsequent job descriptors to shared descriptors:
18 *
19 * --------------- ---------------
20 * | JobDesc #2 |-------------------->| ShareDesc |
21 * | *(packet 2) | |------------->| (hashKey) |
22 * --------------- | |-------->| (operation) |
23 * . | | | (load ctx2) |
24 * . | | ---------------
25 * --------------- | |
26 * | JobDesc #3 |------| |
27 * | *(packet 3) | |
28 * --------------- |
29 * . |
30 * . |
31 * --------------- |
32 * | JobDesc #4 |------------
33 * | *(packet 4) |
34 * ---------------
35 *
36 * The SharedDesc never changes for a connection unless rekeyed, but
37 * each packet will likely be in a different place. So all we need
38 * to know to process the packet is where the input is, where the
39 * output goes, and what context we want to process with. Context is
40 * in the SharedDesc, packet references in the JobDesc.
41 *
42 * So, a job desc looks like:
43 *
44 * ---------------------
45 * | Header |
46 * | ShareDesc Pointer |
47 * | SEQ_OUT_PTR |
48 * | (output buffer) |
49 * | (output length) |
50 * | SEQ_IN_PTR |
51 * | (input buffer) |
52 * | (input length) |
53 * ---------------------
54 */
55
56#include "compat.h"
57
58#include "regs.h"
59#include "intern.h"
60#include "desc_constr.h"
61#include "jr.h"
62#include "error.h"
63#include "sg_sw_sec4.h"
64#include "key_gen.h"
65
66#define CAAM_CRA_PRIORITY 3000
67
68/* max hash key is max split key size */
69#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
70
71#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
72#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
73
74/* length of descriptors text */
75#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
76
77#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
78#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
79#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
80#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
81#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
82#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
83
84#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88/* caam context sizes for hashes: running digest + 8 */
89#define HASH_MSG_LEN 8
90#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92#ifdef DEBUG
93/* for print_hex_dumps with line references */
94#define xstr(s) str(s)
95#define str(s) #s
96#define debug(format, arg...) printk(format, arg)
97#else
98#define debug(format, arg...)
99#endif
100
101/* ahash per-session context */
102struct caam_hash_ctx {
103 struct device *jrdev;
104 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN];
105 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN];
106 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN];
107 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN];
108 u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN];
109 dma_addr_t sh_desc_update_dma;
110 dma_addr_t sh_desc_update_first_dma;
111 dma_addr_t sh_desc_fin_dma;
112 dma_addr_t sh_desc_digest_dma;
113 dma_addr_t sh_desc_finup_dma;
114 u32 alg_type;
115 u32 alg_op;
116 u8 key[CAAM_MAX_HASH_KEY_SIZE];
117 dma_addr_t key_dma;
118 int ctx_len;
119 unsigned int split_key_len;
120 unsigned int split_key_pad_len;
121};
122
123/* ahash state */
124struct caam_hash_state {
125 dma_addr_t buf_dma;
126 dma_addr_t ctx_dma;
127 u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
128 int buflen_0;
129 u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
130 int buflen_1;
131 u8 caam_ctx[MAX_CTX_LEN];
132 int (*update)(struct ahash_request *req);
133 int (*final)(struct ahash_request *req);
134 int (*finup)(struct ahash_request *req);
135 int current_buf;
136};
137
138/* Common job descriptor seq in/out ptr routines */
139
140/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
141static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
142 struct caam_hash_state *state,
143 int ctx_len)
144{
145 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
146 ctx_len, DMA_FROM_DEVICE);
147 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
148}
149
150/* Map req->result, and append seq_out_ptr command that points to it */
151static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev,
152 u8 *result, int digestsize)
153{
154 dma_addr_t dst_dma;
155
156 dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE);
157 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
158
159 return dst_dma;
160}
161
162/* Map current buffer in state and put it in link table */
163static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
164 struct sec4_sg_entry *sec4_sg,
165 u8 *buf, int buflen)
166{
167 dma_addr_t buf_dma;
168
169 buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
170 dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
171
172 return buf_dma;
173}
174
175/* Map req->src and put it in link table */
176static inline void src_map_to_sec4_sg(struct device *jrdev,
177 struct scatterlist *src, int src_nents,
178 struct sec4_sg_entry *sec4_sg)
179{
180 dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE);
181 sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0);
182}
183
184/*
185 * Only put buffer in link table if it contains data, which is possible,
186 * since a buffer has previously been used, and needs to be unmapped,
187 */
188static inline dma_addr_t
189try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
190 u8 *buf, dma_addr_t buf_dma, int buflen,
191 int last_buflen)
192{
193 if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
194 dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
195 if (buflen)
196 buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
197 else
198 buf_dma = 0;
199
200 return buf_dma;
201}
202
203/* Map state->caam_ctx, and add it to link table */
204static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
205 struct caam_hash_state *state,
206 int ctx_len,
207 struct sec4_sg_entry *sec4_sg,
208 u32 flag)
209{
210 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
211 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
212}
213
214/* Common shared descriptor commands */
215static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
216{
217 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
218 ctx->split_key_len, CLASS_2 |
219 KEY_DEST_MDHA_SPLIT | KEY_ENC);
220}
221
222/* Append key if it has been set */
223static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
224{
225 u32 *key_jump_cmd;
226
227 init_sh_desc(desc, HDR_SHARE_WAIT);
228
229 if (ctx->split_key_len) {
230 /* Skip if already shared */
231 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
232 JUMP_COND_SHRD);
233
234 append_key_ahash(desc, ctx);
235
236 set_jump_tgt_here(desc, key_jump_cmd);
237 }
238
239 /* Propagate errors from shared to job descriptor */
240 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
241}
242
243/*
244 * For ahash read data from seqin following state->caam_ctx,
245 * and write resulting class2 context to seqout, which may be state->caam_ctx
246 * or req->result
247 */
248static inline void ahash_append_load_str(u32 *desc, int digestsize)
249{
250 /* Calculate remaining bytes to read */
251 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
252
253 /* Read remaining bytes */
254 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
255 FIFOLD_TYPE_MSG | KEY_VLF);
256
257 /* Store class2 context bytes */
258 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
259 LDST_SRCDST_BYTE_CONTEXT);
260}
261
262/*
263 * For ahash update, final and finup, import context, read and write to seqout
264 */
265static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
266 int digestsize,
267 struct caam_hash_ctx *ctx)
268{
269 init_sh_desc_key_ahash(desc, ctx);
270
271 /* Import context from software */
272 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
273 LDST_CLASS_2_CCB | ctx->ctx_len);
274
275 /* Class 2 operation */
276 append_operation(desc, op | state | OP_ALG_ENCRYPT);
277
278 /*
279 * Load from buf and/or src and write to req->result or state->context
280 */
281 ahash_append_load_str(desc, digestsize);
282}
283
284/* For ahash firsts and digest, read and write to seqout */
285static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
286 int digestsize, struct caam_hash_ctx *ctx)
287{
288 init_sh_desc_key_ahash(desc, ctx);
289
290 /* Class 2 operation */
291 append_operation(desc, op | state | OP_ALG_ENCRYPT);
292
293 /*
294 * Load from buf and/or src and write to req->result or state->context
295 */
296 ahash_append_load_str(desc, digestsize);
297}
298
299static int ahash_set_sh_desc(struct crypto_ahash *ahash)
300{
301 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
302 int digestsize = crypto_ahash_digestsize(ahash);
303 struct device *jrdev = ctx->jrdev;
304 u32 have_key = 0;
305 u32 *desc;
306
307 if (ctx->split_key_len)
308 have_key = OP_ALG_AAI_HMAC_PRECOMP;
309
310 /* ahash_update shared descriptor */
311 desc = ctx->sh_desc_update;
312
313 init_sh_desc(desc, HDR_SHARE_WAIT);
314
315 /* Import context from software */
316 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
317 LDST_CLASS_2_CCB | ctx->ctx_len);
318
319 /* Class 2 operation */
320 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
321 OP_ALG_ENCRYPT);
322
323 /* Load data and write to result or context */
324 ahash_append_load_str(desc, ctx->ctx_len);
325
326 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
327 DMA_TO_DEVICE);
328 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
329 dev_err(jrdev, "unable to map shared descriptor\n");
330 return -ENOMEM;
331 }
332#ifdef DEBUG
333 print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
334 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
335#endif
336
337 /* ahash_update_first shared descriptor */
338 desc = ctx->sh_desc_update_first;
339
340 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
341 ctx->ctx_len, ctx);
342
343 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
344 desc_bytes(desc),
345 DMA_TO_DEVICE);
346 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
347 dev_err(jrdev, "unable to map shared descriptor\n");
348 return -ENOMEM;
349 }
350#ifdef DEBUG
351 print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
352 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
353#endif
354
355 /* ahash_final shared descriptor */
356 desc = ctx->sh_desc_fin;
357
358 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
359 OP_ALG_AS_FINALIZE, digestsize, ctx);
360
361 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
362 DMA_TO_DEVICE);
363 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
364 dev_err(jrdev, "unable to map shared descriptor\n");
365 return -ENOMEM;
366 }
367#ifdef DEBUG
368 print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
369 DUMP_PREFIX_ADDRESS, 16, 4, desc,
370 desc_bytes(desc), 1);
371#endif
372
373 /* ahash_finup shared descriptor */
374 desc = ctx->sh_desc_finup;
375
376 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
377 OP_ALG_AS_FINALIZE, digestsize, ctx);
378
379 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
380 DMA_TO_DEVICE);
381 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
382 dev_err(jrdev, "unable to map shared descriptor\n");
383 return -ENOMEM;
384 }
385#ifdef DEBUG
386 print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
387 DUMP_PREFIX_ADDRESS, 16, 4, desc,
388 desc_bytes(desc), 1);
389#endif
390
391 /* ahash_digest shared descriptor */
392 desc = ctx->sh_desc_digest;
393
394 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
395 digestsize, ctx);
396
397 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
398 desc_bytes(desc),
399 DMA_TO_DEVICE);
400 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
401 dev_err(jrdev, "unable to map shared descriptor\n");
402 return -ENOMEM;
403 }
404#ifdef DEBUG
405 print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
406 DUMP_PREFIX_ADDRESS, 16, 4, desc,
407 desc_bytes(desc), 1);
408#endif
409
410 return 0;
411}
412
413static u32 gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
414 u32 keylen)
415{
416 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
417 ctx->split_key_pad_len, key_in, keylen,
418 ctx->alg_op);
419}
420
421/* Digest hash size if it is too large */
422static u32 hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
423 u32 *keylen, u8 *key_out, u32 digestsize)
424{
425 struct device *jrdev = ctx->jrdev;
426 u32 *desc;
427 struct split_key_result result;
428 dma_addr_t src_dma, dst_dma;
429 int ret = 0;
430
431 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
432
433 init_job_desc(desc, 0);
434
435 src_dma = dma_map_single(jrdev, (void *)key_in, *keylen,
436 DMA_TO_DEVICE);
437 if (dma_mapping_error(jrdev, src_dma)) {
438 dev_err(jrdev, "unable to map key input memory\n");
439 kfree(desc);
440 return -ENOMEM;
441 }
442 dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
443 DMA_FROM_DEVICE);
444 if (dma_mapping_error(jrdev, dst_dma)) {
445 dev_err(jrdev, "unable to map key output memory\n");
446 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
447 kfree(desc);
448 return -ENOMEM;
449 }
450
451 /* Job descriptor to perform unkeyed hash on key_in */
452 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
453 OP_ALG_AS_INITFINAL);
454 append_seq_in_ptr(desc, src_dma, *keylen, 0);
455 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
456 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
457 append_seq_out_ptr(desc, dst_dma, digestsize, 0);
458 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
459 LDST_SRCDST_BYTE_CONTEXT);
460
461#ifdef DEBUG
462 print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
463 DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
464 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
465 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
466#endif
467
468 result.err = 0;
469 init_completion(&result.completion);
470
471 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
472 if (!ret) {
473 /* in progress */
474 wait_for_completion_interruptible(&result.completion);
475 ret = result.err;
476#ifdef DEBUG
477 print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
478 DUMP_PREFIX_ADDRESS, 16, 4, key_in,
479 digestsize, 1);
480#endif
481 }
482 *keylen = digestsize;
483
484 dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
485 dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
486
487 kfree(desc);
488
489 return ret;
490}
491
492static int ahash_setkey(struct crypto_ahash *ahash,
493 const u8 *key, unsigned int keylen)
494{
495 /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
496 static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
497 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
498 struct device *jrdev = ctx->jrdev;
499 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
500 int digestsize = crypto_ahash_digestsize(ahash);
501 int ret = 0;
502 u8 *hashed_key = NULL;
503
504#ifdef DEBUG
505 printk(KERN_ERR "keylen %d\n", keylen);
506#endif
507
508 if (keylen > blocksize) {
509 hashed_key = kmalloc(sizeof(u8) * digestsize, GFP_KERNEL |
510 GFP_DMA);
511 if (!hashed_key)
512 return -ENOMEM;
513 ret = hash_digest_key(ctx, key, &keylen, hashed_key,
514 digestsize);
515 if (ret)
516 goto badkey;
517 key = hashed_key;
518 }
519
520 /* Pick class 2 key length from algorithm submask */
521 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
522 OP_ALG_ALGSEL_SHIFT] * 2;
523 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
524
525#ifdef DEBUG
526 printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
527 ctx->split_key_len, ctx->split_key_pad_len);
528 print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
529 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
530#endif
531
532 ret = gen_split_hash_key(ctx, key, keylen);
533 if (ret)
534 goto badkey;
535
536 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
537 DMA_TO_DEVICE);
538 if (dma_mapping_error(jrdev, ctx->key_dma)) {
539 dev_err(jrdev, "unable to map key i/o memory\n");
540 return -ENOMEM;
541 }
542#ifdef DEBUG
543 print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
544 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
545 ctx->split_key_pad_len, 1);
546#endif
547
548 ret = ahash_set_sh_desc(ahash);
549 if (ret) {
550 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
551 DMA_TO_DEVICE);
552 }
553
554 kfree(hashed_key);
555 return ret;
556badkey:
557 kfree(hashed_key);
558 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
559 return -EINVAL;
560}
561
562/*
563 * ahash_edesc - s/w-extended ahash descriptor
564 * @dst_dma: physical mapped address of req->result
565 * @sec4_sg_dma: physical mapped address of h/w link table
566 * @src_nents: number of segments in input scatterlist
567 * @sec4_sg_bytes: length of dma mapped sec4_sg space
568 * @sec4_sg: pointer to h/w link table
569 * @hw_desc: the h/w job descriptor followed by any referenced link tables
570 */
571struct ahash_edesc {
572 dma_addr_t dst_dma;
573 dma_addr_t sec4_sg_dma;
574 int src_nents;
575 int sec4_sg_bytes;
576 struct sec4_sg_entry *sec4_sg;
577 u32 hw_desc[0];
578};
579
580static inline void ahash_unmap(struct device *dev,
581 struct ahash_edesc *edesc,
582 struct ahash_request *req, int dst_len)
583{
584 if (edesc->src_nents)
585 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
586 if (edesc->dst_dma)
587 dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
588
589 if (edesc->sec4_sg_bytes)
590 dma_unmap_single(dev, edesc->sec4_sg_dma,
591 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
592}
593
594static inline void ahash_unmap_ctx(struct device *dev,
595 struct ahash_edesc *edesc,
596 struct ahash_request *req, int dst_len, u32 flag)
597{
598 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
599 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
600 struct caam_hash_state *state = ahash_request_ctx(req);
601
602 if (state->ctx_dma)
603 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
604 ahash_unmap(dev, edesc, req, dst_len);
605}
606
607static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
608 void *context)
609{
610 struct ahash_request *req = context;
611 struct ahash_edesc *edesc;
612 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
613 int digestsize = crypto_ahash_digestsize(ahash);
614#ifdef DEBUG
615 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
616 struct caam_hash_state *state = ahash_request_ctx(req);
617
618 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
619#endif
620
621 edesc = (struct ahash_edesc *)((char *)desc -
622 offsetof(struct ahash_edesc, hw_desc));
623 if (err) {
624 char tmp[CAAM_ERROR_STR_MAX];
625
626 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
627 }
628
629 ahash_unmap(jrdev, edesc, req, digestsize);
630 kfree(edesc);
631
632#ifdef DEBUG
633 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
634 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
635 ctx->ctx_len, 1);
636 if (req->result)
637 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
638 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
639 digestsize, 1);
640#endif
641
642 req->base.complete(&req->base, err);
643}
644
645static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
646 void *context)
647{
648 struct ahash_request *req = context;
649 struct ahash_edesc *edesc;
650 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
651 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
652#ifdef DEBUG
653 struct caam_hash_state *state = ahash_request_ctx(req);
654 int digestsize = crypto_ahash_digestsize(ahash);
655
656 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
657#endif
658
659 edesc = (struct ahash_edesc *)((char *)desc -
660 offsetof(struct ahash_edesc, hw_desc));
661 if (err) {
662 char tmp[CAAM_ERROR_STR_MAX];
663
664 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
665 }
666
667 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
668 kfree(edesc);
669
670#ifdef DEBUG
671 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
672 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
673 ctx->ctx_len, 1);
674 if (req->result)
675 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
676 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
677 digestsize, 1);
678#endif
679
680 req->base.complete(&req->base, err);
681}
682
683static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
684 void *context)
685{
686 struct ahash_request *req = context;
687 struct ahash_edesc *edesc;
688 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
689 int digestsize = crypto_ahash_digestsize(ahash);
690#ifdef DEBUG
691 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
692 struct caam_hash_state *state = ahash_request_ctx(req);
693
694 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
695#endif
696
697 edesc = (struct ahash_edesc *)((char *)desc -
698 offsetof(struct ahash_edesc, hw_desc));
699 if (err) {
700 char tmp[CAAM_ERROR_STR_MAX];
701
702 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
703 }
704
705 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
706 kfree(edesc);
707
708#ifdef DEBUG
709 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
710 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
711 ctx->ctx_len, 1);
712 if (req->result)
713 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
714 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
715 digestsize, 1);
716#endif
717
718 req->base.complete(&req->base, err);
719}
720
721static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
722 void *context)
723{
724 struct ahash_request *req = context;
725 struct ahash_edesc *edesc;
726 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
727 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
728#ifdef DEBUG
729 struct caam_hash_state *state = ahash_request_ctx(req);
730 int digestsize = crypto_ahash_digestsize(ahash);
731
732 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
733#endif
734
735 edesc = (struct ahash_edesc *)((char *)desc -
736 offsetof(struct ahash_edesc, hw_desc));
737 if (err) {
738 char tmp[CAAM_ERROR_STR_MAX];
739
740 dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
741 }
742
743 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
744 kfree(edesc);
745
746#ifdef DEBUG
747 print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
748 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
749 ctx->ctx_len, 1);
750 if (req->result)
751 print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
752 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
753 digestsize, 1);
754#endif
755
756 req->base.complete(&req->base, err);
757}
758
759/* submit update job descriptor */
760static int ahash_update_ctx(struct ahash_request *req)
761{
762 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
763 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
764 struct caam_hash_state *state = ahash_request_ctx(req);
765 struct device *jrdev = ctx->jrdev;
766 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
767 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
768 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
769 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
770 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
771 int *next_buflen = state->current_buf ? &state->buflen_0 :
772 &state->buflen_1, last_buflen;
773 int in_len = *buflen + req->nbytes, to_hash;
774 u32 *sh_desc = ctx->sh_desc_update, *desc;
775 dma_addr_t ptr = ctx->sh_desc_update_dma;
776 int src_nents, sec4_sg_bytes, sec4_sg_src_index;
777 struct ahash_edesc *edesc;
778 int ret = 0;
779 int sh_len;
780
781 last_buflen = *next_buflen;
782 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
783 to_hash = in_len - *next_buflen;
784
785 if (to_hash) {
786 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen));
787 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
788 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
789 sizeof(struct sec4_sg_entry);
790
791 /*
792 * allocate space for base edesc and hw desc commands,
793 * link tables
794 */
795 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
796 sec4_sg_bytes, GFP_DMA | flags);
797 if (!edesc) {
798 dev_err(jrdev,
799 "could not allocate extended descriptor\n");
800 return -ENOMEM;
801 }
802
803 edesc->src_nents = src_nents;
804 edesc->sec4_sg_bytes = sec4_sg_bytes;
805 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
806 DESC_JOB_IO_LEN;
807 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
808 sec4_sg_bytes,
809 DMA_TO_DEVICE);
810
811 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
812 edesc->sec4_sg, DMA_BIDIRECTIONAL);
813
814 state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
815 edesc->sec4_sg + 1,
816 buf, state->buf_dma,
817 *buflen, last_buflen);
818
819 if (src_nents) {
820 src_map_to_sec4_sg(jrdev, req->src, src_nents,
821 edesc->sec4_sg + sec4_sg_src_index);
822 if (*next_buflen) {
823 sg_copy_part(next_buf, req->src, to_hash -
824 *buflen, req->nbytes);
825 state->current_buf = !state->current_buf;
826 }
827 } else {
828 (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
829 SEC4_SG_LEN_FIN;
830 }
831
832 sh_len = desc_len(sh_desc);
833 desc = edesc->hw_desc;
834 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
835 HDR_REVERSE);
836
837 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
838 to_hash, LDST_SGF);
839
840 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
841
842#ifdef DEBUG
843 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
844 DUMP_PREFIX_ADDRESS, 16, 4, desc,
845 desc_bytes(desc), 1);
846#endif
847
848 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req);
849 if (!ret) {
850 ret = -EINPROGRESS;
851 } else {
852 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
853 DMA_BIDIRECTIONAL);
854 kfree(edesc);
855 }
856 } else if (*next_buflen) {
857 sg_copy(buf + *buflen, req->src, req->nbytes);
858 *buflen = *next_buflen;
859 *next_buflen = last_buflen;
860 }
861#ifdef DEBUG
862 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
863 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
864 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
865 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
866 *next_buflen, 1);
867#endif
868
869 return ret;
870}
871
872static int ahash_final_ctx(struct ahash_request *req)
873{
874 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
875 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
876 struct caam_hash_state *state = ahash_request_ctx(req);
877 struct device *jrdev = ctx->jrdev;
878 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
879 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
880 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
881 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
882 int last_buflen = state->current_buf ? state->buflen_0 :
883 state->buflen_1;
884 u32 *sh_desc = ctx->sh_desc_fin, *desc;
885 dma_addr_t ptr = ctx->sh_desc_fin_dma;
886 int sec4_sg_bytes;
887 int digestsize = crypto_ahash_digestsize(ahash);
888 struct ahash_edesc *edesc;
889 int ret = 0;
890 int sh_len;
891
892 sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
893
894 /* allocate space for base edesc and hw desc commands, link tables */
895 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
896 sec4_sg_bytes, GFP_DMA | flags);
897 if (!edesc) {
898 dev_err(jrdev, "could not allocate extended descriptor\n");
899 return -ENOMEM;
900 }
901
902 sh_len = desc_len(sh_desc);
903 desc = edesc->hw_desc;
904 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
905
906 edesc->sec4_sg_bytes = sec4_sg_bytes;
907 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
908 DESC_JOB_IO_LEN;
909 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
910 sec4_sg_bytes, DMA_TO_DEVICE);
911 edesc->src_nents = 0;
912
913 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
914 DMA_TO_DEVICE);
915
916 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
917 buf, state->buf_dma, buflen,
918 last_buflen);
919 (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
920
921 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
922 LDST_SGF);
923
924 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
925 digestsize);
926
927#ifdef DEBUG
928 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
929 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
930#endif
931
932 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
933 if (!ret) {
934 ret = -EINPROGRESS;
935 } else {
936 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
937 kfree(edesc);
938 }
939
940 return ret;
941}
942
943static int ahash_finup_ctx(struct ahash_request *req)
944{
945 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
946 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
947 struct caam_hash_state *state = ahash_request_ctx(req);
948 struct device *jrdev = ctx->jrdev;
949 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
950 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
951 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
952 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
953 int last_buflen = state->current_buf ? state->buflen_0 :
954 state->buflen_1;
955 u32 *sh_desc = ctx->sh_desc_finup, *desc;
956 dma_addr_t ptr = ctx->sh_desc_finup_dma;
957 int sec4_sg_bytes, sec4_sg_src_index;
958 int src_nents;
959 int digestsize = crypto_ahash_digestsize(ahash);
960 struct ahash_edesc *edesc;
961 int ret = 0;
962 int sh_len;
963
964 src_nents = __sg_count(req->src, req->nbytes);
965 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
966 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
967 sizeof(struct sec4_sg_entry);
968
969 /* allocate space for base edesc and hw desc commands, link tables */
970 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
971 sec4_sg_bytes, GFP_DMA | flags);
972 if (!edesc) {
973 dev_err(jrdev, "could not allocate extended descriptor\n");
974 return -ENOMEM;
975 }
976
977 sh_len = desc_len(sh_desc);
978 desc = edesc->hw_desc;
979 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
980
981 edesc->src_nents = src_nents;
982 edesc->sec4_sg_bytes = sec4_sg_bytes;
983 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
984 DESC_JOB_IO_LEN;
985 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
986 sec4_sg_bytes, DMA_TO_DEVICE);
987
988 ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
989 DMA_TO_DEVICE);
990
991 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
992 buf, state->buf_dma, buflen,
993 last_buflen);
994
995 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
996 sec4_sg_src_index);
997
998 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
999 buflen + req->nbytes, LDST_SGF);
1000
1001 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1002 digestsize);
1003
1004#ifdef DEBUG
1005 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1006 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1007#endif
1008
1009 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req);
1010 if (!ret) {
1011 ret = -EINPROGRESS;
1012 } else {
1013 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
1014 kfree(edesc);
1015 }
1016
1017 return ret;
1018}
1019
1020static int ahash_digest(struct ahash_request *req)
1021{
1022 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1023 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1024 struct device *jrdev = ctx->jrdev;
1025 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1026 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1027 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1028 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1029 int digestsize = crypto_ahash_digestsize(ahash);
1030 int src_nents, sec4_sg_bytes;
1031 dma_addr_t src_dma;
1032 struct ahash_edesc *edesc;
1033 int ret = 0;
1034 u32 options;
1035 int sh_len;
1036
1037 src_nents = sg_count(req->src, req->nbytes);
1038 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1039 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1040
1041 /* allocate space for base edesc and hw desc commands, link tables */
1042 edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
1043 DESC_JOB_IO_LEN, GFP_DMA | flags);
1044 if (!edesc) {
1045 dev_err(jrdev, "could not allocate extended descriptor\n");
1046 return -ENOMEM;
1047 }
1048 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1049 DESC_JOB_IO_LEN;
1050 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1051 sec4_sg_bytes, DMA_TO_DEVICE);
1052 edesc->src_nents = src_nents;
1053
1054 sh_len = desc_len(sh_desc);
1055 desc = edesc->hw_desc;
1056 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1057
1058 if (src_nents) {
1059 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
1060 src_dma = edesc->sec4_sg_dma;
1061 options = LDST_SGF;
1062 } else {
1063 src_dma = sg_dma_address(req->src);
1064 options = 0;
1065 }
1066 append_seq_in_ptr(desc, src_dma, req->nbytes, options);
1067
1068 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1069 digestsize);
1070
1071#ifdef DEBUG
1072 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1073 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1074#endif
1075
1076 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1077 if (!ret) {
1078 ret = -EINPROGRESS;
1079 } else {
1080 ahash_unmap(jrdev, edesc, req, digestsize);
1081 kfree(edesc);
1082 }
1083
1084 return ret;
1085}
1086
1087/* submit ahash final if it the first job descriptor */
1088static int ahash_final_no_ctx(struct ahash_request *req)
1089{
1090 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1091 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1092 struct caam_hash_state *state = ahash_request_ctx(req);
1093 struct device *jrdev = ctx->jrdev;
1094 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1095 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1096 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1097 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1098 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1099 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1100 int digestsize = crypto_ahash_digestsize(ahash);
1101 struct ahash_edesc *edesc;
1102 int ret = 0;
1103 int sh_len;
1104
1105 /* allocate space for base edesc and hw desc commands, link tables */
1106 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
1107 GFP_DMA | flags);
1108 if (!edesc) {
1109 dev_err(jrdev, "could not allocate extended descriptor\n");
1110 return -ENOMEM;
1111 }
1112
1113 sh_len = desc_len(sh_desc);
1114 desc = edesc->hw_desc;
1115 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1116
1117 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
1118
1119 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1120
1121 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1122 digestsize);
1123 edesc->src_nents = 0;
1124
1125#ifdef DEBUG
1126 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1127 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1128#endif
1129
1130 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1131 if (!ret) {
1132 ret = -EINPROGRESS;
1133 } else {
1134 ahash_unmap(jrdev, edesc, req, digestsize);
1135 kfree(edesc);
1136 }
1137
1138 return ret;
1139}
1140
1141/* submit ahash update if it the first job descriptor after update */
1142static int ahash_update_no_ctx(struct ahash_request *req)
1143{
1144 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1145 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1146 struct caam_hash_state *state = ahash_request_ctx(req);
1147 struct device *jrdev = ctx->jrdev;
1148 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1149 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1150 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1151 int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
1152 u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
1153 int *next_buflen = state->current_buf ? &state->buflen_0 :
1154 &state->buflen_1;
1155 int in_len = *buflen + req->nbytes, to_hash;
1156 int sec4_sg_bytes, src_nents;
1157 struct ahash_edesc *edesc;
1158 u32 *desc, *sh_desc = ctx->sh_desc_update_first;
1159 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1160 int ret = 0;
1161 int sh_len;
1162
1163 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1164 to_hash = in_len - *next_buflen;
1165
1166 if (to_hash) {
1167 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen));
1168 sec4_sg_bytes = (1 + src_nents) *
1169 sizeof(struct sec4_sg_entry);
1170
1171 /*
1172 * allocate space for base edesc and hw desc commands,
1173 * link tables
1174 */
1175 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1176 sec4_sg_bytes, GFP_DMA | flags);
1177 if (!edesc) {
1178 dev_err(jrdev,
1179 "could not allocate extended descriptor\n");
1180 return -ENOMEM;
1181 }
1182
1183 edesc->src_nents = src_nents;
1184 edesc->sec4_sg_bytes = sec4_sg_bytes;
1185 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1186 DESC_JOB_IO_LEN;
1187 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1188 sec4_sg_bytes,
1189 DMA_TO_DEVICE);
1190
1191 state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
1192 buf, *buflen);
1193 src_map_to_sec4_sg(jrdev, req->src, src_nents,
1194 edesc->sec4_sg + 1);
1195 if (*next_buflen) {
1196 sg_copy_part(next_buf, req->src, to_hash - *buflen,
1197 req->nbytes);
1198 state->current_buf = !state->current_buf;
1199 }
1200
1201 sh_len = desc_len(sh_desc);
1202 desc = edesc->hw_desc;
1203 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1204 HDR_REVERSE);
1205
1206 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1207
1208 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1209
1210#ifdef DEBUG
1211 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1212 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1213 desc_bytes(desc), 1);
1214#endif
1215
1216 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req);
1217 if (!ret) {
1218 ret = -EINPROGRESS;
1219 state->update = ahash_update_ctx;
1220 state->finup = ahash_finup_ctx;
1221 state->final = ahash_final_ctx;
1222 } else {
1223 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1224 DMA_TO_DEVICE);
1225 kfree(edesc);
1226 }
1227 } else if (*next_buflen) {
1228 sg_copy(buf + *buflen, req->src, req->nbytes);
1229 *buflen = *next_buflen;
1230 *next_buflen = 0;
1231 }
1232#ifdef DEBUG
1233 print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
1234 DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
1235 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1236 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1237 *next_buflen, 1);
1238#endif
1239
1240 return ret;
1241}
1242
1243/* submit ahash finup if it the first job descriptor after update */
1244static int ahash_finup_no_ctx(struct ahash_request *req)
1245{
1246 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1247 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1248 struct caam_hash_state *state = ahash_request_ctx(req);
1249 struct device *jrdev = ctx->jrdev;
1250 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1251 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1252 u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
1253 int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
1254 int last_buflen = state->current_buf ? state->buflen_0 :
1255 state->buflen_1;
1256 u32 *sh_desc = ctx->sh_desc_digest, *desc;
1257 dma_addr_t ptr = ctx->sh_desc_digest_dma;
1258 int sec4_sg_bytes, sec4_sg_src_index, src_nents;
1259 int digestsize = crypto_ahash_digestsize(ahash);
1260 struct ahash_edesc *edesc;
1261 int sh_len;
1262 int ret = 0;
1263
1264 src_nents = __sg_count(req->src, req->nbytes);
1265 sec4_sg_src_index = 2;
1266 sec4_sg_bytes = (sec4_sg_src_index + src_nents) *
1267 sizeof(struct sec4_sg_entry);
1268
1269 /* allocate space for base edesc and hw desc commands, link tables */
1270 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1271 sec4_sg_bytes, GFP_DMA | flags);
1272 if (!edesc) {
1273 dev_err(jrdev, "could not allocate extended descriptor\n");
1274 return -ENOMEM;
1275 }
1276
1277 sh_len = desc_len(sh_desc);
1278 desc = edesc->hw_desc;
1279 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
1280
1281 edesc->src_nents = src_nents;
1282 edesc->sec4_sg_bytes = sec4_sg_bytes;
1283 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1284 DESC_JOB_IO_LEN;
1285 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1286 sec4_sg_bytes, DMA_TO_DEVICE);
1287
1288 state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
1289 state->buf_dma, buflen,
1290 last_buflen);
1291
1292 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1);
1293
1294 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
1295 req->nbytes, LDST_SGF);
1296
1297 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1298 digestsize);
1299
1300#ifdef DEBUG
1301 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1302 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
1303#endif
1304
1305 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req);
1306 if (!ret) {
1307 ret = -EINPROGRESS;
1308 } else {
1309 ahash_unmap(jrdev, edesc, req, digestsize);
1310 kfree(edesc);
1311 }
1312
1313 return ret;
1314}
1315
1316/* submit first update job descriptor after init */
1317static int ahash_update_first(struct ahash_request *req)
1318{
1319 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1320 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1321 struct caam_hash_state *state = ahash_request_ctx(req);
1322 struct device *jrdev = ctx->jrdev;
1323 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1324 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1325 u8 *next_buf = state->buf_0 + state->current_buf *
1326 CAAM_MAX_HASH_BLOCK_SIZE;
1327 int *next_buflen = &state->buflen_0 + state->current_buf;
1328 int to_hash;
1329 u32 *sh_desc = ctx->sh_desc_update_first, *desc;
1330 dma_addr_t ptr = ctx->sh_desc_update_first_dma;
1331 int sec4_sg_bytes, src_nents;
1332 dma_addr_t src_dma;
1333 u32 options;
1334 struct ahash_edesc *edesc;
1335 int ret = 0;
1336 int sh_len;
1337
1338 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1339 1);
1340 to_hash = req->nbytes - *next_buflen;
1341
1342 if (to_hash) {
1343 src_nents = sg_count(req->src, req->nbytes - (*next_buflen));
1344 dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE);
1345 sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
1346
1347 /*
1348 * allocate space for base edesc and hw desc commands,
1349 * link tables
1350 */
1351 edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
1352 sec4_sg_bytes, GFP_DMA | flags);
1353 if (!edesc) {
1354 dev_err(jrdev,
1355 "could not allocate extended descriptor\n");
1356 return -ENOMEM;
1357 }
1358
1359 edesc->src_nents = src_nents;
1360 edesc->sec4_sg_bytes = sec4_sg_bytes;
1361 edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
1362 DESC_JOB_IO_LEN;
1363 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1364 sec4_sg_bytes,
1365 DMA_TO_DEVICE);
1366
1367 if (src_nents) {
1368 sg_to_sec4_sg_last(req->src, src_nents,
1369 edesc->sec4_sg, 0);
1370 src_dma = edesc->sec4_sg_dma;
1371 options = LDST_SGF;
1372 } else {
1373 src_dma = sg_dma_address(req->src);
1374 options = 0;
1375 }
1376
1377 if (*next_buflen)
1378 sg_copy_part(next_buf, req->src, to_hash, req->nbytes);
1379
1380 sh_len = desc_len(sh_desc);
1381 desc = edesc->hw_desc;
1382 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
1383 HDR_REVERSE);
1384
1385 append_seq_in_ptr(desc, src_dma, to_hash, options);
1386
1387 map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1388
1389#ifdef DEBUG
1390 print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
1391 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1392 desc_bytes(desc), 1);
1393#endif
1394
1395 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst,
1396 req);
1397 if (!ret) {
1398 ret = -EINPROGRESS;
1399 state->update = ahash_update_ctx;
1400 state->finup = ahash_finup_ctx;
1401 state->final = ahash_final_ctx;
1402 } else {
1403 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len,
1404 DMA_TO_DEVICE);
1405 kfree(edesc);
1406 }
1407 } else if (*next_buflen) {
1408 state->update = ahash_update_no_ctx;
1409 state->finup = ahash_finup_no_ctx;
1410 state->final = ahash_final_no_ctx;
1411 sg_copy(next_buf, req->src, req->nbytes);
1412 }
1413#ifdef DEBUG
1414 print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
1415 DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
1416 *next_buflen, 1);
1417#endif
1418
1419 return ret;
1420}
1421
1422static int ahash_finup_first(struct ahash_request *req)
1423{
1424 return ahash_digest(req);
1425}
1426
1427static int ahash_init(struct ahash_request *req)
1428{
1429 struct caam_hash_state *state = ahash_request_ctx(req);
1430
1431 state->update = ahash_update_first;
1432 state->finup = ahash_finup_first;
1433 state->final = ahash_final_no_ctx;
1434
1435 state->current_buf = 0;
1436
1437 return 0;
1438}
1439
1440static int ahash_update(struct ahash_request *req)
1441{
1442 struct caam_hash_state *state = ahash_request_ctx(req);
1443
1444 return state->update(req);
1445}
1446
1447static int ahash_finup(struct ahash_request *req)
1448{
1449 struct caam_hash_state *state = ahash_request_ctx(req);
1450
1451 return state->finup(req);
1452}
1453
1454static int ahash_final(struct ahash_request *req)
1455{
1456 struct caam_hash_state *state = ahash_request_ctx(req);
1457
1458 return state->final(req);
1459}
1460
1461static int ahash_export(struct ahash_request *req, void *out)
1462{
1463 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1464 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1465 struct caam_hash_state *state = ahash_request_ctx(req);
1466
1467 memcpy(out, ctx, sizeof(struct caam_hash_ctx));
1468 memcpy(out + sizeof(struct caam_hash_ctx), state,
1469 sizeof(struct caam_hash_state));
1470 return 0;
1471}
1472
1473static int ahash_import(struct ahash_request *req, const void *in)
1474{
1475 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1476 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1477 struct caam_hash_state *state = ahash_request_ctx(req);
1478
1479 memcpy(ctx, in, sizeof(struct caam_hash_ctx));
1480 memcpy(state, in + sizeof(struct caam_hash_ctx),
1481 sizeof(struct caam_hash_state));
1482 return 0;
1483}
1484
1485struct caam_hash_template {
1486 char name[CRYPTO_MAX_ALG_NAME];
1487 char driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001488 char hmac_name[CRYPTO_MAX_ALG_NAME];
1489 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
Yuan Kang045e3672012-06-22 19:48:47 -05001490 unsigned int blocksize;
1491 struct ahash_alg template_ahash;
1492 u32 alg_type;
1493 u32 alg_op;
1494};
1495
1496/* ahash descriptors */
1497static struct caam_hash_template driver_hash[] = {
1498 {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001499 .name = "sha1",
1500 .driver_name = "sha1-caam",
1501 .hmac_name = "hmac(sha1)",
1502 .hmac_driver_name = "hmac-sha1-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001503 .blocksize = SHA1_BLOCK_SIZE,
1504 .template_ahash = {
1505 .init = ahash_init,
1506 .update = ahash_update,
1507 .final = ahash_final,
1508 .finup = ahash_finup,
1509 .digest = ahash_digest,
1510 .export = ahash_export,
1511 .import = ahash_import,
1512 .setkey = ahash_setkey,
1513 .halg = {
1514 .digestsize = SHA1_DIGEST_SIZE,
1515 },
1516 },
1517 .alg_type = OP_ALG_ALGSEL_SHA1,
1518 .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1519 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001520 .name = "sha224",
1521 .driver_name = "sha224-caam",
1522 .hmac_name = "hmac(sha224)",
1523 .hmac_driver_name = "hmac-sha224-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001524 .blocksize = SHA224_BLOCK_SIZE,
1525 .template_ahash = {
1526 .init = ahash_init,
1527 .update = ahash_update,
1528 .final = ahash_final,
1529 .finup = ahash_finup,
1530 .digest = ahash_digest,
1531 .export = ahash_export,
1532 .import = ahash_import,
1533 .setkey = ahash_setkey,
1534 .halg = {
1535 .digestsize = SHA224_DIGEST_SIZE,
1536 },
1537 },
1538 .alg_type = OP_ALG_ALGSEL_SHA224,
1539 .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1540 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001541 .name = "sha256",
1542 .driver_name = "sha256-caam",
1543 .hmac_name = "hmac(sha256)",
1544 .hmac_driver_name = "hmac-sha256-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001545 .blocksize = SHA256_BLOCK_SIZE,
1546 .template_ahash = {
1547 .init = ahash_init,
1548 .update = ahash_update,
1549 .final = ahash_final,
1550 .finup = ahash_finup,
1551 .digest = ahash_digest,
1552 .export = ahash_export,
1553 .import = ahash_import,
1554 .setkey = ahash_setkey,
1555 .halg = {
1556 .digestsize = SHA256_DIGEST_SIZE,
1557 },
1558 },
1559 .alg_type = OP_ALG_ALGSEL_SHA256,
1560 .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1561 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001562 .name = "sha384",
1563 .driver_name = "sha384-caam",
1564 .hmac_name = "hmac(sha384)",
1565 .hmac_driver_name = "hmac-sha384-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001566 .blocksize = SHA384_BLOCK_SIZE,
1567 .template_ahash = {
1568 .init = ahash_init,
1569 .update = ahash_update,
1570 .final = ahash_final,
1571 .finup = ahash_finup,
1572 .digest = ahash_digest,
1573 .export = ahash_export,
1574 .import = ahash_import,
1575 .setkey = ahash_setkey,
1576 .halg = {
1577 .digestsize = SHA384_DIGEST_SIZE,
1578 },
1579 },
1580 .alg_type = OP_ALG_ALGSEL_SHA384,
1581 .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1582 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001583 .name = "sha512",
1584 .driver_name = "sha512-caam",
1585 .hmac_name = "hmac(sha512)",
1586 .hmac_driver_name = "hmac-sha512-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001587 .blocksize = SHA512_BLOCK_SIZE,
1588 .template_ahash = {
1589 .init = ahash_init,
1590 .update = ahash_update,
1591 .final = ahash_final,
1592 .finup = ahash_finup,
1593 .digest = ahash_digest,
1594 .export = ahash_export,
1595 .import = ahash_import,
1596 .setkey = ahash_setkey,
1597 .halg = {
1598 .digestsize = SHA512_DIGEST_SIZE,
1599 },
1600 },
1601 .alg_type = OP_ALG_ALGSEL_SHA512,
1602 .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1603 }, {
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001604 .name = "md5",
1605 .driver_name = "md5-caam",
1606 .hmac_name = "hmac(md5)",
1607 .hmac_driver_name = "hmac-md5-caam",
Yuan Kang045e3672012-06-22 19:48:47 -05001608 .blocksize = MD5_BLOCK_WORDS * 4,
1609 .template_ahash = {
1610 .init = ahash_init,
1611 .update = ahash_update,
1612 .final = ahash_final,
1613 .finup = ahash_finup,
1614 .digest = ahash_digest,
1615 .export = ahash_export,
1616 .import = ahash_import,
1617 .setkey = ahash_setkey,
1618 .halg = {
1619 .digestsize = MD5_DIGEST_SIZE,
1620 },
1621 },
1622 .alg_type = OP_ALG_ALGSEL_MD5,
1623 .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1624 },
1625};
1626
1627struct caam_hash_alg {
1628 struct list_head entry;
1629 struct device *ctrldev;
1630 int alg_type;
1631 int alg_op;
1632 struct ahash_alg ahash_alg;
1633};
1634
1635static int caam_hash_cra_init(struct crypto_tfm *tfm)
1636{
1637 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1638 struct crypto_alg *base = tfm->__crt_alg;
1639 struct hash_alg_common *halg =
1640 container_of(base, struct hash_alg_common, base);
1641 struct ahash_alg *alg =
1642 container_of(halg, struct ahash_alg, halg);
1643 struct caam_hash_alg *caam_hash =
1644 container_of(alg, struct caam_hash_alg, ahash_alg);
1645 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1646 struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
1647 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1648 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1649 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1650 HASH_MSG_LEN + 32,
1651 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1652 HASH_MSG_LEN + 64,
1653 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1654 int tgt_jr = atomic_inc_return(&priv->tfm_count);
1655 int ret = 0;
1656
1657 /*
1658 * distribute tfms across job rings to ensure in-order
1659 * crypto request processing per tfm
1660 */
1661 ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
1662
1663 /* copy descriptor header template value */
1664 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1665 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
1666
1667 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
1668 OP_ALG_ALGSEL_SHIFT];
1669
1670 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1671 sizeof(struct caam_hash_state));
1672
1673 ret = ahash_set_sh_desc(ahash);
1674
1675 return ret;
1676}
1677
1678static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1679{
1680 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1681
1682 if (ctx->sh_desc_update_dma &&
1683 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
1684 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
1685 desc_bytes(ctx->sh_desc_update),
1686 DMA_TO_DEVICE);
1687 if (ctx->sh_desc_update_first_dma &&
1688 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
1689 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
1690 desc_bytes(ctx->sh_desc_update_first),
1691 DMA_TO_DEVICE);
1692 if (ctx->sh_desc_fin_dma &&
1693 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
1694 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
1695 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
1696 if (ctx->sh_desc_digest_dma &&
1697 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
1698 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
1699 desc_bytes(ctx->sh_desc_digest),
1700 DMA_TO_DEVICE);
1701 if (ctx->sh_desc_finup_dma &&
1702 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
1703 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
1704 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
1705}
1706
1707static void __exit caam_algapi_hash_exit(void)
1708{
1709 struct device_node *dev_node;
1710 struct platform_device *pdev;
1711 struct device *ctrldev;
1712 struct caam_drv_private *priv;
1713 struct caam_hash_alg *t_alg, *n;
1714
1715 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1716 if (!dev_node)
1717 return;
1718
1719 pdev = of_find_device_by_node(dev_node);
1720 if (!pdev)
1721 return;
1722
1723 ctrldev = &pdev->dev;
1724 of_node_put(dev_node);
1725 priv = dev_get_drvdata(ctrldev);
1726
1727 if (!priv->hash_list.next)
1728 return;
1729
1730 list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
1731 crypto_unregister_ahash(&t_alg->ahash_alg);
1732 list_del(&t_alg->entry);
1733 kfree(t_alg);
1734 }
1735}
1736
1737static struct caam_hash_alg *
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001738caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
1739 bool keyed)
Yuan Kang045e3672012-06-22 19:48:47 -05001740{
1741 struct caam_hash_alg *t_alg;
1742 struct ahash_alg *halg;
1743 struct crypto_alg *alg;
1744
1745 t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
1746 if (!t_alg) {
1747 dev_err(ctrldev, "failed to allocate t_alg\n");
1748 return ERR_PTR(-ENOMEM);
1749 }
1750
1751 t_alg->ahash_alg = template->template_ahash;
1752 halg = &t_alg->ahash_alg;
1753 alg = &halg->halg.base;
1754
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001755 if (keyed) {
1756 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1757 template->hmac_name);
1758 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1759 template->hmac_driver_name);
1760 } else {
1761 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1762 template->name);
1763 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1764 template->driver_name);
1765 }
Yuan Kang045e3672012-06-22 19:48:47 -05001766 alg->cra_module = THIS_MODULE;
1767 alg->cra_init = caam_hash_cra_init;
1768 alg->cra_exit = caam_hash_cra_exit;
1769 alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
1770 alg->cra_priority = CAAM_CRA_PRIORITY;
1771 alg->cra_blocksize = template->blocksize;
1772 alg->cra_alignmask = 0;
1773 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
1774 alg->cra_type = &crypto_ahash_type;
1775
1776 t_alg->alg_type = template->alg_type;
1777 t_alg->alg_op = template->alg_op;
1778 t_alg->ctrldev = ctrldev;
1779
1780 return t_alg;
1781}
1782
1783static int __init caam_algapi_hash_init(void)
1784{
1785 struct device_node *dev_node;
1786 struct platform_device *pdev;
1787 struct device *ctrldev;
1788 struct caam_drv_private *priv;
1789 int i = 0, err = 0;
1790
1791 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1792 if (!dev_node)
1793 return -ENODEV;
1794
1795 pdev = of_find_device_by_node(dev_node);
1796 if (!pdev)
1797 return -ENODEV;
1798
1799 ctrldev = &pdev->dev;
1800 priv = dev_get_drvdata(ctrldev);
1801 of_node_put(dev_node);
1802
1803 INIT_LIST_HEAD(&priv->hash_list);
1804
1805 atomic_set(&priv->tfm_count, -1);
1806
1807 /* register crypto algorithms the device supports */
1808 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1809 /* TODO: check if h/w supports alg */
1810 struct caam_hash_alg *t_alg;
1811
Yuan Kangb0e09ba2012-06-22 19:48:48 -05001812 /* register hmac version */
1813 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
1814 if (IS_ERR(t_alg)) {
1815 err = PTR_ERR(t_alg);
1816 dev_warn(ctrldev, "%s alg allocation failed\n",
1817 driver_hash[i].driver_name);
1818 continue;
1819 }
1820
1821 err = crypto_register_ahash(&t_alg->ahash_alg);
1822 if (err) {
1823 dev_warn(ctrldev, "%s alg registration failed\n",
1824 t_alg->ahash_alg.halg.base.cra_driver_name);
1825 kfree(t_alg);
1826 } else
1827 list_add_tail(&t_alg->entry, &priv->hash_list);
1828
1829 /* register unkeyed version */
1830 t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
Yuan Kang045e3672012-06-22 19:48:47 -05001831 if (IS_ERR(t_alg)) {
1832 err = PTR_ERR(t_alg);
1833 dev_warn(ctrldev, "%s alg allocation failed\n",
1834 driver_hash[i].driver_name);
1835 continue;
1836 }
1837
1838 err = crypto_register_ahash(&t_alg->ahash_alg);
1839 if (err) {
1840 dev_warn(ctrldev, "%s alg registration failed\n",
1841 t_alg->ahash_alg.halg.base.cra_driver_name);
1842 kfree(t_alg);
1843 } else
1844 list_add_tail(&t_alg->entry, &priv->hash_list);
1845 }
1846
1847 return err;
1848}
1849
1850module_init(caam_algapi_hash_init);
1851module_exit(caam_algapi_hash_exit);
1852
1853MODULE_LICENSE("GPL");
1854MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
1855MODULE_AUTHOR("Freescale Semiconductor - NMG");