| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Support for Marvell's crypto engine which can be found on some Orion5X | 
 | 3 |  * boards. | 
 | 4 |  * | 
 | 5 |  * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | 
 | 6 |  * License: GPLv2 | 
 | 7 |  * | 
 | 8 |  */ | 
 | 9 | #include <crypto/aes.h> | 
 | 10 | #include <crypto/algapi.h> | 
 | 11 | #include <linux/crypto.h> | 
 | 12 | #include <linux/interrupt.h> | 
 | 13 | #include <linux/io.h> | 
 | 14 | #include <linux/kthread.h> | 
 | 15 | #include <linux/platform_device.h> | 
 | 16 | #include <linux/scatterlist.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> | 
| Paul Gortmaker | 4bb33cc | 2011-05-27 14:41:48 -0400 | [diff] [blame] | 18 | #include <linux/module.h> | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 19 | #include <crypto/internal/hash.h> | 
 | 20 | #include <crypto/sha.h> | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 21 |  | 
 | 22 | #include "mv_cesa.h" | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 23 |  | 
 | 24 | #define MV_CESA	"MV-CESA:" | 
 | 25 | #define MAX_HW_HASH_SIZE	0xFFFF | 
 | 26 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 27 | /* | 
 | 28 |  * STM: | 
 | 29 |  *   /---------------------------------------\ | 
 | 30 |  *   |					     | request complete | 
 | 31 |  *  \./					     | | 
 | 32 |  * IDLE -> new request -> BUSY -> done -> DEQUEUE | 
 | 33 |  *                         /°\               | | 
 | 34 |  *			    |		     | more scatter entries | 
 | 35 |  *			    \________________/ | 
 | 36 |  */ | 
 | 37 | enum engine_status { | 
 | 38 | 	ENGINE_IDLE, | 
 | 39 | 	ENGINE_BUSY, | 
 | 40 | 	ENGINE_W_DEQUEUE, | 
 | 41 | }; | 
 | 42 |  | 
 | 43 | /** | 
 | 44 |  * struct req_progress - used for every crypt request | 
 | 45 |  * @src_sg_it:		sg iterator for src | 
 | 46 |  * @dst_sg_it:		sg iterator for dst | 
 | 47 |  * @sg_src_left:	bytes left in src to process (scatter list) | 
 | 48 |  * @src_start:		offset to add to src start position (scatter list) | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 49 |  * @crypt_len:		length of current hw crypt/hash process | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 50 |  * @hw_nbytes:		total bytes to process in hw for this request | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 51 |  * @copy_back:		whether to copy data back (crypt) or not (hash) | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 52 |  * @sg_dst_left:	bytes left dst to process in this scatter list | 
 | 53 |  * @dst_start:		offset to add to dst start position (scatter list) | 
| Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 54 |  * @hw_processed_bytes:	number of bytes processed by hw (request). | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 55 |  * | 
 | 56 |  * sg helper are used to iterate over the scatterlist. Since the size of the | 
 | 57 |  * SRAM may be less than the scatter size, this struct struct is used to keep | 
 | 58 |  * track of progress within current scatterlist. | 
 | 59 |  */ | 
 | 60 | struct req_progress { | 
 | 61 | 	struct sg_mapping_iter src_sg_it; | 
 | 62 | 	struct sg_mapping_iter dst_sg_it; | 
| Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 63 | 	void (*complete) (void); | 
 | 64 | 	void (*process) (int is_first); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 65 |  | 
 | 66 | 	/* src mostly */ | 
 | 67 | 	int sg_src_left; | 
 | 68 | 	int src_start; | 
 | 69 | 	int crypt_len; | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 70 | 	int hw_nbytes; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 71 | 	/* dst mostly */ | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 72 | 	int copy_back; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 73 | 	int sg_dst_left; | 
 | 74 | 	int dst_start; | 
| Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 75 | 	int hw_processed_bytes; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 76 | }; | 
 | 77 |  | 
 | 78 | struct crypto_priv { | 
 | 79 | 	void __iomem *reg; | 
 | 80 | 	void __iomem *sram; | 
 | 81 | 	int irq; | 
 | 82 | 	struct task_struct *queue_th; | 
 | 83 |  | 
 | 84 | 	/* the lock protects queue and eng_st */ | 
 | 85 | 	spinlock_t lock; | 
 | 86 | 	struct crypto_queue queue; | 
 | 87 | 	enum engine_status eng_st; | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 88 | 	struct crypto_async_request *cur_req; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 89 | 	struct req_progress p; | 
 | 90 | 	int max_req_size; | 
 | 91 | 	int sram_size; | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 92 | 	int has_sha1; | 
 | 93 | 	int has_hmac_sha1; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 94 | }; | 
 | 95 |  | 
 | 96 | static struct crypto_priv *cpg; | 
 | 97 |  | 
 | 98 | struct mv_ctx { | 
 | 99 | 	u8 aes_enc_key[AES_KEY_LEN]; | 
 | 100 | 	u32 aes_dec_key[8]; | 
 | 101 | 	int key_len; | 
 | 102 | 	u32 need_calc_aes_dkey; | 
 | 103 | }; | 
 | 104 |  | 
 | 105 | enum crypto_op { | 
 | 106 | 	COP_AES_ECB, | 
 | 107 | 	COP_AES_CBC, | 
 | 108 | }; | 
 | 109 |  | 
 | 110 | struct mv_req_ctx { | 
 | 111 | 	enum crypto_op op; | 
 | 112 | 	int decrypt; | 
 | 113 | }; | 
 | 114 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 115 | enum hash_op { | 
 | 116 | 	COP_SHA1, | 
 | 117 | 	COP_HMAC_SHA1 | 
 | 118 | }; | 
 | 119 |  | 
 | 120 | struct mv_tfm_hash_ctx { | 
 | 121 | 	struct crypto_shash *fallback; | 
 | 122 | 	struct crypto_shash *base_hash; | 
 | 123 | 	u32 ivs[2 * SHA1_DIGEST_SIZE / 4]; | 
 | 124 | 	int count_add; | 
 | 125 | 	enum hash_op op; | 
 | 126 | }; | 
 | 127 |  | 
 | 128 | struct mv_req_hash_ctx { | 
 | 129 | 	u64 count; | 
 | 130 | 	u32 state[SHA1_DIGEST_SIZE / 4]; | 
 | 131 | 	u8 buffer[SHA1_BLOCK_SIZE]; | 
 | 132 | 	int first_hash;		/* marks that we don't have previous state */ | 
 | 133 | 	int last_chunk;		/* marks that this is the 'final' request */ | 
 | 134 | 	int extra_bytes;	/* unprocessed bytes in buffer */ | 
 | 135 | 	enum hash_op op; | 
 | 136 | 	int count_add; | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 137 | }; | 
 | 138 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 139 | static void compute_aes_dec_key(struct mv_ctx *ctx) | 
 | 140 | { | 
 | 141 | 	struct crypto_aes_ctx gen_aes_key; | 
 | 142 | 	int key_pos; | 
 | 143 |  | 
 | 144 | 	if (!ctx->need_calc_aes_dkey) | 
 | 145 | 		return; | 
 | 146 |  | 
 | 147 | 	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len); | 
 | 148 |  | 
 | 149 | 	key_pos = ctx->key_len + 24; | 
 | 150 | 	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4); | 
 | 151 | 	switch (ctx->key_len) { | 
 | 152 | 	case AES_KEYSIZE_256: | 
 | 153 | 		key_pos -= 2; | 
 | 154 | 		/* fall */ | 
 | 155 | 	case AES_KEYSIZE_192: | 
 | 156 | 		key_pos -= 2; | 
 | 157 | 		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], | 
 | 158 | 				4 * 4); | 
 | 159 | 		break; | 
 | 160 | 	} | 
 | 161 | 	ctx->need_calc_aes_dkey = 0; | 
 | 162 | } | 
 | 163 |  | 
 | 164 | static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, | 
 | 165 | 		unsigned int len) | 
 | 166 | { | 
 | 167 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 168 | 	struct mv_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 169 |  | 
 | 170 | 	switch (len) { | 
 | 171 | 	case AES_KEYSIZE_128: | 
 | 172 | 	case AES_KEYSIZE_192: | 
 | 173 | 	case AES_KEYSIZE_256: | 
 | 174 | 		break; | 
 | 175 | 	default: | 
 | 176 | 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 
 | 177 | 		return -EINVAL; | 
 | 178 | 	} | 
 | 179 | 	ctx->key_len = len; | 
 | 180 | 	ctx->need_calc_aes_dkey = 1; | 
 | 181 |  | 
 | 182 | 	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN); | 
 | 183 | 	return 0; | 
 | 184 | } | 
 | 185 |  | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 186 | static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len) | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 187 | { | 
 | 188 | 	int ret; | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 189 | 	void *sbuf; | 
| Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 190 | 	int copy_len; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 191 |  | 
| Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 192 | 	while (len) { | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 193 | 		if (!p->sg_src_left) { | 
 | 194 | 			ret = sg_miter_next(&p->src_sg_it); | 
 | 195 | 			BUG_ON(!ret); | 
 | 196 | 			p->sg_src_left = p->src_sg_it.length; | 
 | 197 | 			p->src_start = 0; | 
 | 198 | 		} | 
 | 199 |  | 
 | 200 | 		sbuf = p->src_sg_it.addr + p->src_start; | 
 | 201 |  | 
| Phil Sutter | 6677a77 | 2011-05-05 15:29:02 +0200 | [diff] [blame] | 202 | 		copy_len = min(p->sg_src_left, len); | 
 | 203 | 		memcpy(dbuf, sbuf, copy_len); | 
 | 204 |  | 
 | 205 | 		p->src_start += copy_len; | 
 | 206 | 		p->sg_src_left -= copy_len; | 
 | 207 |  | 
 | 208 | 		len -= copy_len; | 
 | 209 | 		dbuf += copy_len; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 210 | 	} | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 211 | } | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 212 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 213 | static void setup_data_in(void) | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 214 | { | 
 | 215 | 	struct req_progress *p = &cpg->p; | 
| Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 216 | 	int data_in_sram = | 
| Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 217 | 	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size); | 
| Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 218 | 	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len, | 
 | 219 | 			data_in_sram - p->crypt_len); | 
 | 220 | 	p->crypt_len = data_in_sram; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 221 | } | 
 | 222 |  | 
 | 223 | static void mv_process_current_q(int first_block) | 
 | 224 | { | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 225 | 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 226 | 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 227 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 228 | 	struct sec_accel_config op; | 
 | 229 |  | 
 | 230 | 	switch (req_ctx->op) { | 
 | 231 | 	case COP_AES_ECB: | 
 | 232 | 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB; | 
 | 233 | 		break; | 
 | 234 | 	case COP_AES_CBC: | 
| Uri Simchoni | 6bc6fcd | 2010-04-08 19:25:56 +0300 | [diff] [blame] | 235 | 	default: | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 236 | 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC; | 
 | 237 | 		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | | 
 | 238 | 			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF); | 
 | 239 | 		if (first_block) | 
 | 240 | 			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); | 
 | 241 | 		break; | 
 | 242 | 	} | 
 | 243 | 	if (req_ctx->decrypt) { | 
 | 244 | 		op.config |= CFG_DIR_DEC; | 
 | 245 | 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, | 
 | 246 | 				AES_KEY_LEN); | 
 | 247 | 	} else { | 
 | 248 | 		op.config |= CFG_DIR_ENC; | 
 | 249 | 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, | 
 | 250 | 				AES_KEY_LEN); | 
 | 251 | 	} | 
 | 252 |  | 
 | 253 | 	switch (ctx->key_len) { | 
 | 254 | 	case AES_KEYSIZE_128: | 
 | 255 | 		op.config |= CFG_AES_LEN_128; | 
 | 256 | 		break; | 
 | 257 | 	case AES_KEYSIZE_192: | 
 | 258 | 		op.config |= CFG_AES_LEN_192; | 
 | 259 | 		break; | 
 | 260 | 	case AES_KEYSIZE_256: | 
 | 261 | 		op.config |= CFG_AES_LEN_256; | 
 | 262 | 		break; | 
 | 263 | 	} | 
 | 264 | 	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) | | 
 | 265 | 		ENC_P_DST(SRAM_DATA_OUT_START); | 
 | 266 | 	op.enc_key_p = SRAM_DATA_KEY_P; | 
 | 267 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 268 | 	setup_data_in(); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 269 | 	op.enc_len = cpg->p.crypt_len; | 
 | 270 | 	memcpy(cpg->sram + SRAM_CONFIG, &op, | 
 | 271 | 			sizeof(struct sec_accel_config)); | 
 | 272 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 273 | 	/* GO */ | 
 | 274 | 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 
 | 275 |  | 
 | 276 | 	/* | 
 | 277 | 	 * XXX: add timer if the interrupt does not occur for some mystery | 
 | 278 | 	 * reason | 
 | 279 | 	 */ | 
 | 280 | } | 
 | 281 |  | 
 | 282 | static void mv_crypto_algo_completion(void) | 
 | 283 | { | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 284 | 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 285 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 286 |  | 
| Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 287 | 	sg_miter_stop(&cpg->p.src_sg_it); | 
 | 288 | 	sg_miter_stop(&cpg->p.dst_sg_it); | 
 | 289 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 290 | 	if (req_ctx->op != COP_AES_CBC) | 
 | 291 | 		return ; | 
 | 292 |  | 
 | 293 | 	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); | 
 | 294 | } | 
 | 295 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 296 | static void mv_process_hash_current(int first_block) | 
 | 297 | { | 
 | 298 | 	struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 
| Phil Sutter | cc8d350 | 2011-05-05 15:29:03 +0200 | [diff] [blame] | 299 | 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 300 | 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 
 | 301 | 	struct req_progress *p = &cpg->p; | 
 | 302 | 	struct sec_accel_config op = { 0 }; | 
 | 303 | 	int is_last; | 
 | 304 |  | 
 | 305 | 	switch (req_ctx->op) { | 
 | 306 | 	case COP_SHA1: | 
 | 307 | 	default: | 
 | 308 | 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1; | 
 | 309 | 		break; | 
 | 310 | 	case COP_HMAC_SHA1: | 
 | 311 | 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1; | 
| Phil Sutter | cc8d350 | 2011-05-05 15:29:03 +0200 | [diff] [blame] | 312 | 		memcpy(cpg->sram + SRAM_HMAC_IV_IN, | 
 | 313 | 				tfm_ctx->ivs, sizeof(tfm_ctx->ivs)); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 314 | 		break; | 
 | 315 | 	} | 
 | 316 |  | 
 | 317 | 	op.mac_src_p = | 
 | 318 | 		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32) | 
 | 319 | 		req_ctx-> | 
 | 320 | 		count); | 
 | 321 |  | 
 | 322 | 	setup_data_in(); | 
 | 323 |  | 
 | 324 | 	op.mac_digest = | 
 | 325 | 		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len); | 
 | 326 | 	op.mac_iv = | 
 | 327 | 		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) | | 
 | 328 | 		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT); | 
 | 329 |  | 
 | 330 | 	is_last = req_ctx->last_chunk | 
 | 331 | 		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes) | 
 | 332 | 		&& (req_ctx->count <= MAX_HW_HASH_SIZE); | 
 | 333 | 	if (req_ctx->first_hash) { | 
 | 334 | 		if (is_last) | 
 | 335 | 			op.config |= CFG_NOT_FRAG; | 
 | 336 | 		else | 
 | 337 | 			op.config |= CFG_FIRST_FRAG; | 
 | 338 |  | 
 | 339 | 		req_ctx->first_hash = 0; | 
 | 340 | 	} else { | 
 | 341 | 		if (is_last) | 
 | 342 | 			op.config |= CFG_LAST_FRAG; | 
 | 343 | 		else | 
 | 344 | 			op.config |= CFG_MID_FRAG; | 
| Phil Sutter | 8652348 | 2011-05-05 15:29:04 +0200 | [diff] [blame] | 345 |  | 
| Phil Sutter | 2742528 | 2011-11-16 18:28:01 +0100 | [diff] [blame] | 346 | 		if (first_block) { | 
 | 347 | 			writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A); | 
 | 348 | 			writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B); | 
 | 349 | 			writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C); | 
 | 350 | 			writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D); | 
 | 351 | 			writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E); | 
 | 352 | 		} | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 353 | 	} | 
 | 354 |  | 
 | 355 | 	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config)); | 
 | 356 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 357 | 	/* GO */ | 
 | 358 | 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD); | 
 | 359 |  | 
 | 360 | 	/* | 
 | 361 | 	* XXX: add timer if the interrupt does not occur for some mystery | 
 | 362 | 	* reason | 
 | 363 | 	*/ | 
 | 364 | } | 
 | 365 |  | 
 | 366 | static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx, | 
 | 367 | 					  struct shash_desc *desc) | 
 | 368 | { | 
 | 369 | 	int i; | 
 | 370 | 	struct sha1_state shash_state; | 
 | 371 |  | 
 | 372 | 	shash_state.count = ctx->count + ctx->count_add; | 
 | 373 | 	for (i = 0; i < 5; i++) | 
 | 374 | 		shash_state.state[i] = ctx->state[i]; | 
 | 375 | 	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer)); | 
 | 376 | 	return crypto_shash_import(desc, &shash_state); | 
 | 377 | } | 
 | 378 |  | 
 | 379 | static int mv_hash_final_fallback(struct ahash_request *req) | 
 | 380 | { | 
 | 381 | 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 382 | 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); | 
 | 383 | 	struct { | 
 | 384 | 		struct shash_desc shash; | 
 | 385 | 		char ctx[crypto_shash_descsize(tfm_ctx->fallback)]; | 
 | 386 | 	} desc; | 
 | 387 | 	int rc; | 
 | 388 |  | 
 | 389 | 	desc.shash.tfm = tfm_ctx->fallback; | 
 | 390 | 	desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 391 | 	if (unlikely(req_ctx->first_hash)) { | 
 | 392 | 		crypto_shash_init(&desc.shash); | 
 | 393 | 		crypto_shash_update(&desc.shash, req_ctx->buffer, | 
 | 394 | 				    req_ctx->extra_bytes); | 
 | 395 | 	} else { | 
 | 396 | 		/* only SHA1 for now.... | 
 | 397 | 		 */ | 
 | 398 | 		rc = mv_hash_import_sha1_ctx(req_ctx, &desc.shash); | 
 | 399 | 		if (rc) | 
 | 400 | 			goto out; | 
 | 401 | 	} | 
 | 402 | 	rc = crypto_shash_final(&desc.shash, req->result); | 
 | 403 | out: | 
 | 404 | 	return rc; | 
 | 405 | } | 
 | 406 |  | 
 | 407 | static void mv_hash_algo_completion(void) | 
 | 408 | { | 
 | 409 | 	struct ahash_request *req = ahash_request_cast(cpg->cur_req); | 
 | 410 | 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 
 | 411 |  | 
 | 412 | 	if (ctx->extra_bytes) | 
 | 413 | 		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes); | 
 | 414 | 	sg_miter_stop(&cpg->p.src_sg_it); | 
 | 415 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 416 | 	if (likely(ctx->last_chunk)) { | 
 | 417 | 		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) { | 
 | 418 | 			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, | 
 | 419 | 			       crypto_ahash_digestsize(crypto_ahash_reqtfm | 
 | 420 | 						       (req))); | 
 | 421 | 		} else | 
 | 422 | 			mv_hash_final_fallback(req); | 
| Phil Sutter | 7a1c6bc | 2011-05-05 15:29:01 +0200 | [diff] [blame] | 423 | 	} else { | 
 | 424 | 		ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A); | 
 | 425 | 		ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B); | 
 | 426 | 		ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C); | 
 | 427 | 		ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D); | 
 | 428 | 		ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 429 | 	} | 
 | 430 | } | 
 | 431 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 432 | static void dequeue_complete_req(void) | 
 | 433 | { | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 434 | 	struct crypto_async_request *req = cpg->cur_req; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 435 | 	void *buf; | 
 | 436 | 	int ret; | 
| Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 437 | 	cpg->p.hw_processed_bytes += cpg->p.crypt_len; | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 438 | 	if (cpg->p.copy_back) { | 
 | 439 | 		int need_copy_len = cpg->p.crypt_len; | 
 | 440 | 		int sram_offset = 0; | 
 | 441 | 		do { | 
 | 442 | 			int dst_copy; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 443 |  | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 444 | 			if (!cpg->p.sg_dst_left) { | 
 | 445 | 				ret = sg_miter_next(&cpg->p.dst_sg_it); | 
 | 446 | 				BUG_ON(!ret); | 
 | 447 | 				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length; | 
 | 448 | 				cpg->p.dst_start = 0; | 
 | 449 | 			} | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 450 |  | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 451 | 			buf = cpg->p.dst_sg_it.addr; | 
 | 452 | 			buf += cpg->p.dst_start; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 453 |  | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 454 | 			dst_copy = min(need_copy_len, cpg->p.sg_dst_left); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 455 |  | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 456 | 			memcpy(buf, | 
 | 457 | 			       cpg->sram + SRAM_DATA_OUT_START + sram_offset, | 
 | 458 | 			       dst_copy); | 
 | 459 | 			sram_offset += dst_copy; | 
 | 460 | 			cpg->p.sg_dst_left -= dst_copy; | 
 | 461 | 			need_copy_len -= dst_copy; | 
 | 462 | 			cpg->p.dst_start += dst_copy; | 
 | 463 | 		} while (need_copy_len > 0); | 
 | 464 | 	} | 
 | 465 |  | 
| Uri Simchoni | 0c5c6c4 | 2010-04-08 19:33:26 +0300 | [diff] [blame] | 466 | 	cpg->p.crypt_len = 0; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 467 |  | 
 | 468 | 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE); | 
| Uri Simchoni | 7a5f691 | 2010-04-08 19:29:16 +0300 | [diff] [blame] | 469 | 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) { | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 470 | 		/* process next scatter list entry */ | 
 | 471 | 		cpg->eng_st = ENGINE_BUSY; | 
| Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 472 | 		cpg->p.process(0); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 473 | 	} else { | 
| Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 474 | 		cpg->p.complete(); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 475 | 		cpg->eng_st = ENGINE_IDLE; | 
| Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 476 | 		local_bh_disable(); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 477 | 		req->complete(req, 0); | 
| Uri Simchoni | 0328ac2 | 2010-04-08 19:25:37 +0300 | [diff] [blame] | 478 | 		local_bh_enable(); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 479 | 	} | 
 | 480 | } | 
 | 481 |  | 
 | 482 | static int count_sgs(struct scatterlist *sl, unsigned int total_bytes) | 
 | 483 | { | 
 | 484 | 	int i = 0; | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 485 | 	size_t cur_len; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 486 |  | 
| Phil Sutter | 6ef8450 | 2011-05-05 15:29:06 +0200 | [diff] [blame] | 487 | 	while (sl) { | 
| Uri Simchoni | 15d4dd3 | 2010-04-08 19:27:02 +0300 | [diff] [blame] | 488 | 		cur_len = sl[i].length; | 
 | 489 | 		++i; | 
 | 490 | 		if (total_bytes > cur_len) | 
 | 491 | 			total_bytes -= cur_len; | 
 | 492 | 		else | 
 | 493 | 			break; | 
 | 494 | 	} | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 495 |  | 
 | 496 | 	return i; | 
 | 497 | } | 
 | 498 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 499 | static void mv_start_new_crypt_req(struct ablkcipher_request *req) | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 500 | { | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 501 | 	struct req_progress *p = &cpg->p; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 502 | 	int num_sgs; | 
 | 503 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 504 | 	cpg->cur_req = &req->base; | 
 | 505 | 	memset(p, 0, sizeof(struct req_progress)); | 
 | 506 | 	p->hw_nbytes = req->nbytes; | 
| Uri Simchoni | a58094a | 2010-04-08 19:30:19 +0300 | [diff] [blame] | 507 | 	p->complete = mv_crypto_algo_completion; | 
 | 508 | 	p->process = mv_process_current_q; | 
| Uri Simchoni | f0d03de | 2010-04-08 19:31:48 +0300 | [diff] [blame] | 509 | 	p->copy_back = 1; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 510 |  | 
 | 511 | 	num_sgs = count_sgs(req->src, req->nbytes); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 512 | 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 513 |  | 
 | 514 | 	num_sgs = count_sgs(req->dst, req->nbytes); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 515 | 	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); | 
 | 516 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 517 | 	mv_process_current_q(1); | 
 | 518 | } | 
 | 519 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 520 | static void mv_start_new_hash_req(struct ahash_request *req) | 
 | 521 | { | 
 | 522 | 	struct req_progress *p = &cpg->p; | 
 | 523 | 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 524 | 	int num_sgs, hw_bytes, old_extra_bytes, rc; | 
 | 525 | 	cpg->cur_req = &req->base; | 
 | 526 | 	memset(p, 0, sizeof(struct req_progress)); | 
 | 527 | 	hw_bytes = req->nbytes + ctx->extra_bytes; | 
 | 528 | 	old_extra_bytes = ctx->extra_bytes; | 
 | 529 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 530 | 	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE; | 
 | 531 | 	if (ctx->extra_bytes != 0 | 
 | 532 | 	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE)) | 
 | 533 | 		hw_bytes -= ctx->extra_bytes; | 
 | 534 | 	else | 
 | 535 | 		ctx->extra_bytes = 0; | 
 | 536 |  | 
 | 537 | 	num_sgs = count_sgs(req->src, req->nbytes); | 
 | 538 | 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); | 
 | 539 |  | 
 | 540 | 	if (hw_bytes) { | 
 | 541 | 		p->hw_nbytes = hw_bytes; | 
 | 542 | 		p->complete = mv_hash_algo_completion; | 
 | 543 | 		p->process = mv_process_hash_current; | 
 | 544 |  | 
| Phil Sutter | 7759995 | 2011-05-05 15:29:05 +0200 | [diff] [blame] | 545 | 		if (unlikely(old_extra_bytes)) { | 
 | 546 | 			memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer, | 
 | 547 | 			       old_extra_bytes); | 
 | 548 | 			p->crypt_len = old_extra_bytes; | 
 | 549 | 		} | 
 | 550 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 551 | 		mv_process_hash_current(1); | 
 | 552 | 	} else { | 
 | 553 | 		copy_src_to_buf(p, ctx->buffer + old_extra_bytes, | 
 | 554 | 				ctx->extra_bytes - old_extra_bytes); | 
 | 555 | 		sg_miter_stop(&p->src_sg_it); | 
 | 556 | 		if (ctx->last_chunk) | 
 | 557 | 			rc = mv_hash_final_fallback(req); | 
 | 558 | 		else | 
 | 559 | 			rc = 0; | 
 | 560 | 		cpg->eng_st = ENGINE_IDLE; | 
 | 561 | 		local_bh_disable(); | 
 | 562 | 		req->base.complete(&req->base, rc); | 
 | 563 | 		local_bh_enable(); | 
 | 564 | 	} | 
 | 565 | } | 
 | 566 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 567 | static int queue_manag(void *data) | 
 | 568 | { | 
 | 569 | 	cpg->eng_st = ENGINE_IDLE; | 
 | 570 | 	do { | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 571 | 		struct crypto_async_request *async_req = NULL; | 
 | 572 | 		struct crypto_async_request *backlog; | 
 | 573 |  | 
 | 574 | 		__set_current_state(TASK_INTERRUPTIBLE); | 
 | 575 |  | 
 | 576 | 		if (cpg->eng_st == ENGINE_W_DEQUEUE) | 
 | 577 | 			dequeue_complete_req(); | 
 | 578 |  | 
 | 579 | 		spin_lock_irq(&cpg->lock); | 
 | 580 | 		if (cpg->eng_st == ENGINE_IDLE) { | 
 | 581 | 			backlog = crypto_get_backlog(&cpg->queue); | 
 | 582 | 			async_req = crypto_dequeue_request(&cpg->queue); | 
 | 583 | 			if (async_req) { | 
 | 584 | 				BUG_ON(cpg->eng_st != ENGINE_IDLE); | 
 | 585 | 				cpg->eng_st = ENGINE_BUSY; | 
 | 586 | 			} | 
 | 587 | 		} | 
 | 588 | 		spin_unlock_irq(&cpg->lock); | 
 | 589 |  | 
 | 590 | 		if (backlog) { | 
 | 591 | 			backlog->complete(backlog, -EINPROGRESS); | 
 | 592 | 			backlog = NULL; | 
 | 593 | 		} | 
 | 594 |  | 
 | 595 | 		if (async_req) { | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 596 | 			if (async_req->tfm->__crt_alg->cra_type != | 
 | 597 | 			    &crypto_ahash_type) { | 
 | 598 | 				struct ablkcipher_request *req = | 
| Phil Sutter | 042e9e7 | 2011-05-05 15:28:57 +0200 | [diff] [blame] | 599 | 				    ablkcipher_request_cast(async_req); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 600 | 				mv_start_new_crypt_req(req); | 
 | 601 | 			} else { | 
 | 602 | 				struct ahash_request *req = | 
 | 603 | 				    ahash_request_cast(async_req); | 
 | 604 | 				mv_start_new_hash_req(req); | 
 | 605 | 			} | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 606 | 			async_req = NULL; | 
 | 607 | 		} | 
 | 608 |  | 
 | 609 | 		schedule(); | 
 | 610 |  | 
 | 611 | 	} while (!kthread_should_stop()); | 
 | 612 | 	return 0; | 
 | 613 | } | 
 | 614 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 615 | static int mv_handle_req(struct crypto_async_request *req) | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 616 | { | 
 | 617 | 	unsigned long flags; | 
 | 618 | 	int ret; | 
 | 619 |  | 
 | 620 | 	spin_lock_irqsave(&cpg->lock, flags); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 621 | 	ret = crypto_enqueue_request(&cpg->queue, req); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 622 | 	spin_unlock_irqrestore(&cpg->lock, flags); | 
 | 623 | 	wake_up_process(cpg->queue_th); | 
 | 624 | 	return ret; | 
 | 625 | } | 
 | 626 |  | 
 | 627 | static int mv_enc_aes_ecb(struct ablkcipher_request *req) | 
 | 628 | { | 
 | 629 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 630 |  | 
 | 631 | 	req_ctx->op = COP_AES_ECB; | 
 | 632 | 	req_ctx->decrypt = 0; | 
 | 633 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 634 | 	return mv_handle_req(&req->base); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 635 | } | 
 | 636 |  | 
 | 637 | static int mv_dec_aes_ecb(struct ablkcipher_request *req) | 
 | 638 | { | 
 | 639 | 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 640 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 641 |  | 
 | 642 | 	req_ctx->op = COP_AES_ECB; | 
 | 643 | 	req_ctx->decrypt = 1; | 
 | 644 |  | 
 | 645 | 	compute_aes_dec_key(ctx); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 646 | 	return mv_handle_req(&req->base); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 647 | } | 
 | 648 |  | 
 | 649 | static int mv_enc_aes_cbc(struct ablkcipher_request *req) | 
 | 650 | { | 
 | 651 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 652 |  | 
 | 653 | 	req_ctx->op = COP_AES_CBC; | 
 | 654 | 	req_ctx->decrypt = 0; | 
 | 655 |  | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 656 | 	return mv_handle_req(&req->base); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 657 | } | 
 | 658 |  | 
 | 659 | static int mv_dec_aes_cbc(struct ablkcipher_request *req) | 
 | 660 | { | 
 | 661 | 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 662 | 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); | 
 | 663 |  | 
 | 664 | 	req_ctx->op = COP_AES_CBC; | 
 | 665 | 	req_ctx->decrypt = 1; | 
 | 666 |  | 
 | 667 | 	compute_aes_dec_key(ctx); | 
| Uri Simchoni | 3b61a90 | 2010-04-08 19:27:33 +0300 | [diff] [blame] | 668 | 	return mv_handle_req(&req->base); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 669 | } | 
 | 670 |  | 
 | 671 | static int mv_cra_init(struct crypto_tfm *tfm) | 
 | 672 | { | 
 | 673 | 	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx); | 
 | 674 | 	return 0; | 
 | 675 | } | 
 | 676 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 677 | static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op, | 
 | 678 | 				 int is_last, unsigned int req_len, | 
 | 679 | 				 int count_add) | 
 | 680 | { | 
 | 681 | 	memset(ctx, 0, sizeof(*ctx)); | 
 | 682 | 	ctx->op = op; | 
 | 683 | 	ctx->count = req_len; | 
 | 684 | 	ctx->first_hash = 1; | 
 | 685 | 	ctx->last_chunk = is_last; | 
 | 686 | 	ctx->count_add = count_add; | 
 | 687 | } | 
 | 688 |  | 
 | 689 | static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last, | 
 | 690 | 				   unsigned req_len) | 
 | 691 | { | 
 | 692 | 	ctx->last_chunk = is_last; | 
 | 693 | 	ctx->count += req_len; | 
 | 694 | } | 
 | 695 |  | 
 | 696 | static int mv_hash_init(struct ahash_request *req) | 
 | 697 | { | 
 | 698 | 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 699 | 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, | 
 | 700 | 			     tfm_ctx->count_add); | 
 | 701 | 	return 0; | 
 | 702 | } | 
 | 703 |  | 
 | 704 | static int mv_hash_update(struct ahash_request *req) | 
 | 705 | { | 
 | 706 | 	if (!req->nbytes) | 
 | 707 | 		return 0; | 
 | 708 |  | 
 | 709 | 	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); | 
 | 710 | 	return mv_handle_req(&req->base); | 
 | 711 | } | 
 | 712 |  | 
 | 713 | static int mv_hash_final(struct ahash_request *req) | 
 | 714 | { | 
 | 715 | 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); | 
| Phil Sutter | 6ef8450 | 2011-05-05 15:29:06 +0200 | [diff] [blame] | 716 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 717 | 	mv_update_hash_req_ctx(ctx, 1, 0); | 
 | 718 | 	return mv_handle_req(&req->base); | 
 | 719 | } | 
 | 720 |  | 
 | 721 | static int mv_hash_finup(struct ahash_request *req) | 
 | 722 | { | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 723 | 	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); | 
 | 724 | 	return mv_handle_req(&req->base); | 
 | 725 | } | 
 | 726 |  | 
 | 727 | static int mv_hash_digest(struct ahash_request *req) | 
 | 728 | { | 
 | 729 | 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); | 
 | 730 | 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, | 
 | 731 | 			     req->nbytes, tfm_ctx->count_add); | 
 | 732 | 	return mv_handle_req(&req->base); | 
 | 733 | } | 
 | 734 |  | 
 | 735 | static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate, | 
 | 736 | 			     const void *ostate) | 
 | 737 | { | 
 | 738 | 	const struct sha1_state *isha1_state = istate, *osha1_state = ostate; | 
 | 739 | 	int i; | 
 | 740 | 	for (i = 0; i < 5; i++) { | 
 | 741 | 		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]); | 
 | 742 | 		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]); | 
 | 743 | 	} | 
 | 744 | } | 
 | 745 |  | 
 | 746 | static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key, | 
 | 747 | 			  unsigned int keylen) | 
 | 748 | { | 
 | 749 | 	int rc; | 
 | 750 | 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base); | 
 | 751 | 	int bs, ds, ss; | 
 | 752 |  | 
 | 753 | 	if (!ctx->base_hash) | 
 | 754 | 		return 0; | 
 | 755 |  | 
 | 756 | 	rc = crypto_shash_setkey(ctx->fallback, key, keylen); | 
 | 757 | 	if (rc) | 
 | 758 | 		return rc; | 
 | 759 |  | 
 | 760 | 	/* Can't see a way to extract the ipad/opad from the fallback tfm | 
 | 761 | 	   so I'm basically copying code from the hmac module */ | 
 | 762 | 	bs = crypto_shash_blocksize(ctx->base_hash); | 
 | 763 | 	ds = crypto_shash_digestsize(ctx->base_hash); | 
 | 764 | 	ss = crypto_shash_statesize(ctx->base_hash); | 
 | 765 |  | 
 | 766 | 	{ | 
 | 767 | 		struct { | 
 | 768 | 			struct shash_desc shash; | 
 | 769 | 			char ctx[crypto_shash_descsize(ctx->base_hash)]; | 
 | 770 | 		} desc; | 
 | 771 | 		unsigned int i; | 
 | 772 | 		char ipad[ss]; | 
 | 773 | 		char opad[ss]; | 
 | 774 |  | 
 | 775 | 		desc.shash.tfm = ctx->base_hash; | 
 | 776 | 		desc.shash.flags = crypto_shash_get_flags(ctx->base_hash) & | 
 | 777 | 		    CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 778 |  | 
 | 779 | 		if (keylen > bs) { | 
 | 780 | 			int err; | 
 | 781 |  | 
 | 782 | 			err = | 
 | 783 | 			    crypto_shash_digest(&desc.shash, key, keylen, ipad); | 
 | 784 | 			if (err) | 
 | 785 | 				return err; | 
 | 786 |  | 
 | 787 | 			keylen = ds; | 
 | 788 | 		} else | 
 | 789 | 			memcpy(ipad, key, keylen); | 
 | 790 |  | 
 | 791 | 		memset(ipad + keylen, 0, bs - keylen); | 
 | 792 | 		memcpy(opad, ipad, bs); | 
 | 793 |  | 
 | 794 | 		for (i = 0; i < bs; i++) { | 
 | 795 | 			ipad[i] ^= 0x36; | 
 | 796 | 			opad[i] ^= 0x5c; | 
 | 797 | 		} | 
 | 798 |  | 
 | 799 | 		rc = crypto_shash_init(&desc.shash) ? : | 
 | 800 | 		    crypto_shash_update(&desc.shash, ipad, bs) ? : | 
 | 801 | 		    crypto_shash_export(&desc.shash, ipad) ? : | 
 | 802 | 		    crypto_shash_init(&desc.shash) ? : | 
 | 803 | 		    crypto_shash_update(&desc.shash, opad, bs) ? : | 
 | 804 | 		    crypto_shash_export(&desc.shash, opad); | 
 | 805 |  | 
 | 806 | 		if (rc == 0) | 
 | 807 | 			mv_hash_init_ivs(ctx, ipad, opad); | 
 | 808 |  | 
 | 809 | 		return rc; | 
 | 810 | 	} | 
 | 811 | } | 
 | 812 |  | 
 | 813 | static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, | 
 | 814 | 			    enum hash_op op, int count_add) | 
 | 815 | { | 
 | 816 | 	const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 
 | 817 | 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 818 | 	struct crypto_shash *fallback_tfm = NULL; | 
 | 819 | 	struct crypto_shash *base_hash = NULL; | 
 | 820 | 	int err = -ENOMEM; | 
 | 821 |  | 
 | 822 | 	ctx->op = op; | 
 | 823 | 	ctx->count_add = count_add; | 
 | 824 |  | 
 | 825 | 	/* Allocate a fallback and abort if it failed. */ | 
 | 826 | 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | 
 | 827 | 					  CRYPTO_ALG_NEED_FALLBACK); | 
 | 828 | 	if (IS_ERR(fallback_tfm)) { | 
 | 829 | 		printk(KERN_WARNING MV_CESA | 
 | 830 | 		       "Fallback driver '%s' could not be loaded!\n", | 
 | 831 | 		       fallback_driver_name); | 
 | 832 | 		err = PTR_ERR(fallback_tfm); | 
 | 833 | 		goto out; | 
 | 834 | 	} | 
 | 835 | 	ctx->fallback = fallback_tfm; | 
 | 836 |  | 
 | 837 | 	if (base_hash_name) { | 
 | 838 | 		/* Allocate a hash to compute the ipad/opad of hmac. */ | 
 | 839 | 		base_hash = crypto_alloc_shash(base_hash_name, 0, | 
 | 840 | 					       CRYPTO_ALG_NEED_FALLBACK); | 
 | 841 | 		if (IS_ERR(base_hash)) { | 
 | 842 | 			printk(KERN_WARNING MV_CESA | 
 | 843 | 			       "Base driver '%s' could not be loaded!\n", | 
 | 844 | 			       base_hash_name); | 
| Roel Kluin | 41f2977 | 2011-01-04 15:37:16 +1100 | [diff] [blame] | 845 | 			err = PTR_ERR(base_hash); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 846 | 			goto err_bad_base; | 
 | 847 | 		} | 
 | 848 | 	} | 
 | 849 | 	ctx->base_hash = base_hash; | 
 | 850 |  | 
 | 851 | 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | 
 | 852 | 				 sizeof(struct mv_req_hash_ctx) + | 
 | 853 | 				 crypto_shash_descsize(ctx->fallback)); | 
 | 854 | 	return 0; | 
 | 855 | err_bad_base: | 
 | 856 | 	crypto_free_shash(fallback_tfm); | 
 | 857 | out: | 
 | 858 | 	return err; | 
 | 859 | } | 
 | 860 |  | 
 | 861 | static void mv_cra_hash_exit(struct crypto_tfm *tfm) | 
 | 862 | { | 
 | 863 | 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 864 |  | 
 | 865 | 	crypto_free_shash(ctx->fallback); | 
 | 866 | 	if (ctx->base_hash) | 
 | 867 | 		crypto_free_shash(ctx->base_hash); | 
 | 868 | } | 
 | 869 |  | 
 | 870 | static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm) | 
 | 871 | { | 
 | 872 | 	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0); | 
 | 873 | } | 
 | 874 |  | 
 | 875 | static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm) | 
 | 876 | { | 
 | 877 | 	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE); | 
 | 878 | } | 
 | 879 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 880 | irqreturn_t crypto_int(int irq, void *priv) | 
 | 881 | { | 
 | 882 | 	u32 val; | 
 | 883 |  | 
 | 884 | 	val = readl(cpg->reg + SEC_ACCEL_INT_STATUS); | 
 | 885 | 	if (!(val & SEC_INT_ACCEL0_DONE)) | 
 | 886 | 		return IRQ_NONE; | 
 | 887 |  | 
 | 888 | 	val &= ~SEC_INT_ACCEL0_DONE; | 
 | 889 | 	writel(val, cpg->reg + FPGA_INT_STATUS); | 
 | 890 | 	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS); | 
 | 891 | 	BUG_ON(cpg->eng_st != ENGINE_BUSY); | 
 | 892 | 	cpg->eng_st = ENGINE_W_DEQUEUE; | 
 | 893 | 	wake_up_process(cpg->queue_th); | 
 | 894 | 	return IRQ_HANDLED; | 
 | 895 | } | 
 | 896 |  | 
 | 897 | struct crypto_alg mv_aes_alg_ecb = { | 
 | 898 | 	.cra_name		= "ecb(aes)", | 
 | 899 | 	.cra_driver_name	= "mv-ecb-aes", | 
 | 900 | 	.cra_priority	= 300, | 
 | 901 | 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 902 | 	.cra_blocksize	= 16, | 
 | 903 | 	.cra_ctxsize	= sizeof(struct mv_ctx), | 
 | 904 | 	.cra_alignmask	= 0, | 
 | 905 | 	.cra_type	= &crypto_ablkcipher_type, | 
 | 906 | 	.cra_module	= THIS_MODULE, | 
 | 907 | 	.cra_init	= mv_cra_init, | 
 | 908 | 	.cra_u		= { | 
 | 909 | 		.ablkcipher = { | 
 | 910 | 			.min_keysize	=	AES_MIN_KEY_SIZE, | 
 | 911 | 			.max_keysize	=	AES_MAX_KEY_SIZE, | 
 | 912 | 			.setkey		=	mv_setkey_aes, | 
 | 913 | 			.encrypt	=	mv_enc_aes_ecb, | 
 | 914 | 			.decrypt	=	mv_dec_aes_ecb, | 
 | 915 | 		}, | 
 | 916 | 	}, | 
 | 917 | }; | 
 | 918 |  | 
 | 919 | struct crypto_alg mv_aes_alg_cbc = { | 
 | 920 | 	.cra_name		= "cbc(aes)", | 
 | 921 | 	.cra_driver_name	= "mv-cbc-aes", | 
 | 922 | 	.cra_priority	= 300, | 
 | 923 | 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 924 | 	.cra_blocksize	= AES_BLOCK_SIZE, | 
 | 925 | 	.cra_ctxsize	= sizeof(struct mv_ctx), | 
 | 926 | 	.cra_alignmask	= 0, | 
 | 927 | 	.cra_type	= &crypto_ablkcipher_type, | 
 | 928 | 	.cra_module	= THIS_MODULE, | 
 | 929 | 	.cra_init	= mv_cra_init, | 
 | 930 | 	.cra_u		= { | 
 | 931 | 		.ablkcipher = { | 
 | 932 | 			.ivsize		=	AES_BLOCK_SIZE, | 
 | 933 | 			.min_keysize	=	AES_MIN_KEY_SIZE, | 
 | 934 | 			.max_keysize	=	AES_MAX_KEY_SIZE, | 
 | 935 | 			.setkey		=	mv_setkey_aes, | 
 | 936 | 			.encrypt	=	mv_enc_aes_cbc, | 
 | 937 | 			.decrypt	=	mv_dec_aes_cbc, | 
 | 938 | 		}, | 
 | 939 | 	}, | 
 | 940 | }; | 
 | 941 |  | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 942 | struct ahash_alg mv_sha1_alg = { | 
 | 943 | 	.init = mv_hash_init, | 
 | 944 | 	.update = mv_hash_update, | 
 | 945 | 	.final = mv_hash_final, | 
 | 946 | 	.finup = mv_hash_finup, | 
 | 947 | 	.digest = mv_hash_digest, | 
 | 948 | 	.halg = { | 
 | 949 | 		 .digestsize = SHA1_DIGEST_SIZE, | 
 | 950 | 		 .base = { | 
 | 951 | 			  .cra_name = "sha1", | 
 | 952 | 			  .cra_driver_name = "mv-sha1", | 
 | 953 | 			  .cra_priority = 300, | 
 | 954 | 			  .cra_flags = | 
 | 955 | 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | 
 | 956 | 			  .cra_blocksize = SHA1_BLOCK_SIZE, | 
 | 957 | 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | 
 | 958 | 			  .cra_init = mv_cra_hash_sha1_init, | 
 | 959 | 			  .cra_exit = mv_cra_hash_exit, | 
 | 960 | 			  .cra_module = THIS_MODULE, | 
 | 961 | 			  } | 
 | 962 | 		 } | 
 | 963 | }; | 
 | 964 |  | 
 | 965 | struct ahash_alg mv_hmac_sha1_alg = { | 
 | 966 | 	.init = mv_hash_init, | 
 | 967 | 	.update = mv_hash_update, | 
 | 968 | 	.final = mv_hash_final, | 
 | 969 | 	.finup = mv_hash_finup, | 
 | 970 | 	.digest = mv_hash_digest, | 
 | 971 | 	.setkey = mv_hash_setkey, | 
 | 972 | 	.halg = { | 
 | 973 | 		 .digestsize = SHA1_DIGEST_SIZE, | 
 | 974 | 		 .base = { | 
 | 975 | 			  .cra_name = "hmac(sha1)", | 
 | 976 | 			  .cra_driver_name = "mv-hmac-sha1", | 
 | 977 | 			  .cra_priority = 300, | 
 | 978 | 			  .cra_flags = | 
 | 979 | 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | 
 | 980 | 			  .cra_blocksize = SHA1_BLOCK_SIZE, | 
 | 981 | 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx), | 
 | 982 | 			  .cra_init = mv_cra_hash_hmac_sha1_init, | 
 | 983 | 			  .cra_exit = mv_cra_hash_exit, | 
 | 984 | 			  .cra_module = THIS_MODULE, | 
 | 985 | 			  } | 
 | 986 | 		 } | 
 | 987 | }; | 
 | 988 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 989 | static int mv_probe(struct platform_device *pdev) | 
 | 990 | { | 
 | 991 | 	struct crypto_priv *cp; | 
 | 992 | 	struct resource *res; | 
 | 993 | 	int irq; | 
 | 994 | 	int ret; | 
 | 995 |  | 
 | 996 | 	if (cpg) { | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 997 | 		printk(KERN_ERR MV_CESA "Second crypto dev?\n"); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 998 | 		return -EEXIST; | 
 | 999 | 	} | 
 | 1000 |  | 
 | 1001 | 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 
 | 1002 | 	if (!res) | 
 | 1003 | 		return -ENXIO; | 
 | 1004 |  | 
 | 1005 | 	cp = kzalloc(sizeof(*cp), GFP_KERNEL); | 
 | 1006 | 	if (!cp) | 
 | 1007 | 		return -ENOMEM; | 
 | 1008 |  | 
 | 1009 | 	spin_lock_init(&cp->lock); | 
 | 1010 | 	crypto_init_queue(&cp->queue, 50); | 
| Tobias Klauser | 5bdd5de | 2010-05-14 14:58:05 +1000 | [diff] [blame] | 1011 | 	cp->reg = ioremap(res->start, resource_size(res)); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1012 | 	if (!cp->reg) { | 
 | 1013 | 		ret = -ENOMEM; | 
 | 1014 | 		goto err; | 
 | 1015 | 	} | 
 | 1016 |  | 
 | 1017 | 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); | 
 | 1018 | 	if (!res) { | 
 | 1019 | 		ret = -ENXIO; | 
 | 1020 | 		goto err_unmap_reg; | 
 | 1021 | 	} | 
| Tobias Klauser | 5bdd5de | 2010-05-14 14:58:05 +1000 | [diff] [blame] | 1022 | 	cp->sram_size = resource_size(res); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1023 | 	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE; | 
 | 1024 | 	cp->sram = ioremap(res->start, cp->sram_size); | 
 | 1025 | 	if (!cp->sram) { | 
 | 1026 | 		ret = -ENOMEM; | 
 | 1027 | 		goto err_unmap_reg; | 
 | 1028 | 	} | 
 | 1029 |  | 
 | 1030 | 	irq = platform_get_irq(pdev, 0); | 
 | 1031 | 	if (irq < 0 || irq == NO_IRQ) { | 
 | 1032 | 		ret = irq; | 
 | 1033 | 		goto err_unmap_sram; | 
 | 1034 | 	} | 
 | 1035 | 	cp->irq = irq; | 
 | 1036 |  | 
 | 1037 | 	platform_set_drvdata(pdev, cp); | 
 | 1038 | 	cpg = cp; | 
 | 1039 |  | 
 | 1040 | 	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto"); | 
 | 1041 | 	if (IS_ERR(cp->queue_th)) { | 
 | 1042 | 		ret = PTR_ERR(cp->queue_th); | 
| Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1043 | 		goto err_unmap_sram; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1044 | 	} | 
 | 1045 |  | 
 | 1046 | 	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), | 
 | 1047 | 			cp); | 
 | 1048 | 	if (ret) | 
| Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1049 | 		goto err_thread; | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1050 |  | 
 | 1051 | 	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); | 
 | 1052 | 	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); | 
| Phil Sutter | 99db3ea | 2011-05-05 15:28:58 +0200 | [diff] [blame] | 1053 | 	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1054 |  | 
 | 1055 | 	ret = crypto_register_alg(&mv_aes_alg_ecb); | 
| Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1056 | 	if (ret) { | 
 | 1057 | 		printk(KERN_WARNING MV_CESA | 
 | 1058 | 		       "Could not register aes-ecb driver\n"); | 
| Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1059 | 		goto err_irq; | 
| Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1060 | 	} | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1061 |  | 
 | 1062 | 	ret = crypto_register_alg(&mv_aes_alg_cbc); | 
| Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1063 | 	if (ret) { | 
 | 1064 | 		printk(KERN_WARNING MV_CESA | 
 | 1065 | 		       "Could not register aes-cbc driver\n"); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1066 | 		goto err_unreg_ecb; | 
| Phil Sutter | 2a025f5 | 2011-05-05 15:29:00 +0200 | [diff] [blame] | 1067 | 	} | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 1068 |  | 
 | 1069 | 	ret = crypto_register_ahash(&mv_sha1_alg); | 
 | 1070 | 	if (ret == 0) | 
 | 1071 | 		cpg->has_sha1 = 1; | 
 | 1072 | 	else | 
 | 1073 | 		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n"); | 
 | 1074 |  | 
 | 1075 | 	ret = crypto_register_ahash(&mv_hmac_sha1_alg); | 
 | 1076 | 	if (ret == 0) { | 
 | 1077 | 		cpg->has_hmac_sha1 = 1; | 
 | 1078 | 	} else { | 
 | 1079 | 		printk(KERN_WARNING MV_CESA | 
 | 1080 | 		       "Could not register hmac-sha1 driver\n"); | 
 | 1081 | 	} | 
 | 1082 |  | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1083 | 	return 0; | 
 | 1084 | err_unreg_ecb: | 
 | 1085 | 	crypto_unregister_alg(&mv_aes_alg_ecb); | 
| Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1086 | err_irq: | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1087 | 	free_irq(irq, cp); | 
| Dan Carpenter | 7cc2835 | 2010-05-26 10:45:22 +1000 | [diff] [blame] | 1088 | err_thread: | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1089 | 	kthread_stop(cp->queue_th); | 
 | 1090 | err_unmap_sram: | 
 | 1091 | 	iounmap(cp->sram); | 
 | 1092 | err_unmap_reg: | 
 | 1093 | 	iounmap(cp->reg); | 
 | 1094 | err: | 
 | 1095 | 	kfree(cp); | 
 | 1096 | 	cpg = NULL; | 
 | 1097 | 	platform_set_drvdata(pdev, NULL); | 
 | 1098 | 	return ret; | 
 | 1099 | } | 
 | 1100 |  | 
 | 1101 | static int mv_remove(struct platform_device *pdev) | 
 | 1102 | { | 
 | 1103 | 	struct crypto_priv *cp = platform_get_drvdata(pdev); | 
 | 1104 |  | 
 | 1105 | 	crypto_unregister_alg(&mv_aes_alg_ecb); | 
 | 1106 | 	crypto_unregister_alg(&mv_aes_alg_cbc); | 
| Uri Simchoni | 750052d | 2010-04-08 19:34:55 +0300 | [diff] [blame] | 1107 | 	if (cp->has_sha1) | 
 | 1108 | 		crypto_unregister_ahash(&mv_sha1_alg); | 
 | 1109 | 	if (cp->has_hmac_sha1) | 
 | 1110 | 		crypto_unregister_ahash(&mv_hmac_sha1_alg); | 
| Sebastian Andrzej Siewior | 85a7f0ac | 2009-08-10 12:50:03 +1000 | [diff] [blame] | 1111 | 	kthread_stop(cp->queue_th); | 
 | 1112 | 	free_irq(cp->irq, cp); | 
 | 1113 | 	memset(cp->sram, 0, cp->sram_size); | 
 | 1114 | 	iounmap(cp->sram); | 
 | 1115 | 	iounmap(cp->reg); | 
 | 1116 | 	kfree(cp); | 
 | 1117 | 	cpg = NULL; | 
 | 1118 | 	return 0; | 
 | 1119 | } | 
 | 1120 |  | 
 | 1121 | static struct platform_driver marvell_crypto = { | 
 | 1122 | 	.probe		= mv_probe, | 
 | 1123 | 	.remove		= mv_remove, | 
 | 1124 | 	.driver		= { | 
 | 1125 | 		.owner	= THIS_MODULE, | 
 | 1126 | 		.name	= "mv_crypto", | 
 | 1127 | 	}, | 
 | 1128 | }; | 
 | 1129 | MODULE_ALIAS("platform:mv_crypto"); | 
 | 1130 |  | 
 | 1131 | static int __init mv_crypto_init(void) | 
 | 1132 | { | 
 | 1133 | 	return platform_driver_register(&marvell_crypto); | 
 | 1134 | } | 
 | 1135 | module_init(mv_crypto_init); | 
 | 1136 |  | 
 | 1137 | static void __exit mv_crypto_exit(void) | 
 | 1138 | { | 
 | 1139 | 	platform_driver_unregister(&marvell_crypto); | 
 | 1140 | } | 
 | 1141 | module_exit(mv_crypto_exit); | 
 | 1142 |  | 
 | 1143 | MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>"); | 
 | 1144 | MODULE_DESCRIPTION("Support for Marvell's cryptographic engine"); | 
 | 1145 | MODULE_LICENSE("GPL"); |