| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Cryptographic API. | 
 | 3 |  * | 
 | 4 |  * Support for VIA PadLock hardware crypto engine. | 
 | 5 |  * | 
 | 6 |  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz> | 
 | 7 |  * | 
 | 8 |  * This program is free software; you can redistribute it and/or modify | 
 | 9 |  * it under the terms of the GNU General Public License as published by | 
 | 10 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 11 |  * (at your option) any later version. | 
 | 12 |  * | 
 | 13 |  */ | 
 | 14 |  | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 15 | #include <crypto/internal/hash.h> | 
| Herbert Xu | 2149308 | 2011-01-07 14:52:00 +1100 | [diff] [blame] | 16 | #include <crypto/padlock.h> | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 17 | #include <crypto/sha.h> | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 18 | #include <linux/err.h> | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 19 | #include <linux/module.h> | 
 | 20 | #include <linux/init.h> | 
 | 21 | #include <linux/errno.h> | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 22 | #include <linux/interrupt.h> | 
 | 23 | #include <linux/kernel.h> | 
 | 24 | #include <linux/scatterlist.h> | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 25 | #include <asm/i387.h> | 
| Herbert Xu | 4c6ab3e | 2009-09-21 23:21:53 -0700 | [diff] [blame] | 26 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 27 | struct padlock_sha_desc { | 
 | 28 | 	struct shash_desc fallback; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 29 | }; | 
 | 30 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 31 | struct padlock_sha_ctx { | 
 | 32 | 	struct crypto_shash *fallback; | 
 | 33 | }; | 
 | 34 |  | 
 | 35 | static int padlock_sha_init(struct shash_desc *desc) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 36 | { | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 37 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
 | 38 | 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); | 
 | 39 |  | 
 | 40 | 	dctx->fallback.tfm = ctx->fallback; | 
 | 41 | 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 42 | 	return crypto_shash_init(&dctx->fallback); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 43 | } | 
 | 44 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 45 | static int padlock_sha_update(struct shash_desc *desc, | 
 | 46 | 			      const u8 *data, unsigned int length) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 47 | { | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 48 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 49 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 50 | 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 51 | 	return crypto_shash_update(&dctx->fallback, data, length); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 52 | } | 
 | 53 |  | 
| Herbert Xu | a8d7ac2 | 2010-02-01 09:17:56 +1100 | [diff] [blame] | 54 | static int padlock_sha_export(struct shash_desc *desc, void *out) | 
 | 55 | { | 
 | 56 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
 | 57 |  | 
 | 58 | 	return crypto_shash_export(&dctx->fallback, out); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static int padlock_sha_import(struct shash_desc *desc, const void *in) | 
 | 62 | { | 
 | 63 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
 | 64 | 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); | 
 | 65 |  | 
 | 66 | 	dctx->fallback.tfm = ctx->fallback; | 
 | 67 | 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 68 | 	return crypto_shash_import(&dctx->fallback, in); | 
 | 69 | } | 
 | 70 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 71 | static inline void padlock_output_block(uint32_t *src, | 
 | 72 | 		 	uint32_t *dst, size_t count) | 
 | 73 | { | 
 | 74 | 	while (count--) | 
 | 75 | 		*dst++ = swab32(*src++); | 
 | 76 | } | 
 | 77 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 78 | static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, | 
 | 79 | 			      unsigned int count, u8 *out) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 80 | { | 
 | 81 | 	/* We can't store directly to *out as it may be unaligned. */ | 
 | 82 | 	/* BTW Don't reduce the buffer size below 128 Bytes! | 
 | 83 | 	 *     PadLock microcode needs it that big. */ | 
| Herbert Xu | 4c6ab3e | 2009-09-21 23:21:53 -0700 | [diff] [blame] | 84 | 	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | 
 | 85 | 		((aligned(STACK_ALIGN))); | 
 | 86 | 	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 87 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
 | 88 | 	struct sha1_state state; | 
 | 89 | 	unsigned int space; | 
 | 90 | 	unsigned int leftover; | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 91 | 	int ts_state; | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 92 | 	int err; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 93 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 94 | 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 95 | 	err = crypto_shash_export(&dctx->fallback, &state); | 
 | 96 | 	if (err) | 
 | 97 | 		goto out; | 
 | 98 |  | 
 | 99 | 	if (state.count + count > ULONG_MAX) | 
 | 100 | 		return crypto_shash_finup(&dctx->fallback, in, count, out); | 
 | 101 |  | 
 | 102 | 	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1; | 
 | 103 | 	space =  SHA1_BLOCK_SIZE - leftover; | 
 | 104 | 	if (space) { | 
 | 105 | 		if (count > space) { | 
 | 106 | 			err = crypto_shash_update(&dctx->fallback, in, space) ?: | 
 | 107 | 			      crypto_shash_export(&dctx->fallback, &state); | 
 | 108 | 			if (err) | 
 | 109 | 				goto out; | 
 | 110 | 			count -= space; | 
 | 111 | 			in += space; | 
 | 112 | 		} else { | 
 | 113 | 			memcpy(state.buffer + leftover, in, count); | 
 | 114 | 			in = state.buffer; | 
 | 115 | 			count += leftover; | 
| Herbert Xu | e9b25f1 | 2009-07-16 10:33:27 +0800 | [diff] [blame] | 116 | 			state.count &= ~(SHA1_BLOCK_SIZE - 1); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 117 | 		} | 
 | 118 | 	} | 
 | 119 |  | 
 | 120 | 	memcpy(result, &state.state, SHA1_DIGEST_SIZE); | 
 | 121 |  | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 122 | 	/* prevent taking the spurious DNA fault with padlock. */ | 
 | 123 | 	ts_state = irq_ts_save(); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 124 | 	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 125 | 		      : \ | 
| Herbert Xu | faae890 | 2009-07-15 18:37:48 +0800 | [diff] [blame] | 126 | 		      : "c"((unsigned long)state.count + count), \ | 
 | 127 | 			"a"((unsigned long)state.count), \ | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 128 | 			"S"(in), "D"(result)); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 129 | 	irq_ts_restore(ts_state); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 130 |  | 
 | 131 | 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 132 |  | 
 | 133 | out: | 
 | 134 | 	return err; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 135 | } | 
 | 136 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 137 | static int padlock_sha1_final(struct shash_desc *desc, u8 *out) | 
 | 138 | { | 
 | 139 | 	u8 buf[4]; | 
 | 140 |  | 
 | 141 | 	return padlock_sha1_finup(desc, buf, 0, out); | 
 | 142 | } | 
 | 143 |  | 
 | 144 | static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, | 
 | 145 | 				unsigned int count, u8 *out) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 146 | { | 
 | 147 | 	/* We can't store directly to *out as it may be unaligned. */ | 
 | 148 | 	/* BTW Don't reduce the buffer size below 128 Bytes! | 
 | 149 | 	 *     PadLock microcode needs it that big. */ | 
| Herbert Xu | 4c6ab3e | 2009-09-21 23:21:53 -0700 | [diff] [blame] | 150 | 	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | 
 | 151 | 		((aligned(STACK_ALIGN))); | 
 | 152 | 	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 153 | 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc); | 
 | 154 | 	struct sha256_state state; | 
 | 155 | 	unsigned int space; | 
 | 156 | 	unsigned int leftover; | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 157 | 	int ts_state; | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 158 | 	int err; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 159 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 160 | 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; | 
 | 161 | 	err = crypto_shash_export(&dctx->fallback, &state); | 
 | 162 | 	if (err) | 
 | 163 | 		goto out; | 
 | 164 |  | 
 | 165 | 	if (state.count + count > ULONG_MAX) | 
 | 166 | 		return crypto_shash_finup(&dctx->fallback, in, count, out); | 
 | 167 |  | 
 | 168 | 	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1; | 
 | 169 | 	space =  SHA256_BLOCK_SIZE - leftover; | 
 | 170 | 	if (space) { | 
 | 171 | 		if (count > space) { | 
 | 172 | 			err = crypto_shash_update(&dctx->fallback, in, space) ?: | 
 | 173 | 			      crypto_shash_export(&dctx->fallback, &state); | 
 | 174 | 			if (err) | 
 | 175 | 				goto out; | 
 | 176 | 			count -= space; | 
 | 177 | 			in += space; | 
 | 178 | 		} else { | 
 | 179 | 			memcpy(state.buf + leftover, in, count); | 
 | 180 | 			in = state.buf; | 
 | 181 | 			count += leftover; | 
| Herbert Xu | e9b25f1 | 2009-07-16 10:33:27 +0800 | [diff] [blame] | 182 | 			state.count &= ~(SHA1_BLOCK_SIZE - 1); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 183 | 		} | 
 | 184 | 	} | 
 | 185 |  | 
 | 186 | 	memcpy(result, &state.state, SHA256_DIGEST_SIZE); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 187 |  | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 188 | 	/* prevent taking the spurious DNA fault with padlock. */ | 
 | 189 | 	ts_state = irq_ts_save(); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 190 | 	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 191 | 		      : \ | 
| Herbert Xu | faae890 | 2009-07-15 18:37:48 +0800 | [diff] [blame] | 192 | 		      : "c"((unsigned long)state.count + count), \ | 
 | 193 | 			"a"((unsigned long)state.count), \ | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 194 | 			"S"(in), "D"(result)); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 195 | 	irq_ts_restore(ts_state); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 196 |  | 
 | 197 | 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 198 |  | 
 | 199 | out: | 
 | 200 | 	return err; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 201 | } | 
 | 202 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 203 | static int padlock_sha256_final(struct shash_desc *desc, u8 *out) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 204 | { | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 205 | 	u8 buf[4]; | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 206 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 207 | 	return padlock_sha256_finup(desc, buf, 0, out); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 208 | } | 
 | 209 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 210 | static int padlock_cra_init(struct crypto_tfm *tfm) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 211 | { | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 212 | 	struct crypto_shash *hash = __crypto_shash_cast(tfm); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 213 | 	const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 214 | 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 215 | 	struct crypto_shash *fallback_tfm; | 
 | 216 | 	int err = -ENOMEM; | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 217 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 218 | 	/* Allocate a fallback and abort if it failed. */ | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 219 | 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0, | 
 | 220 | 					  CRYPTO_ALG_NEED_FALLBACK); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 221 | 	if (IS_ERR(fallback_tfm)) { | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 222 | 		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 
 | 223 | 		       fallback_driver_name); | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 224 | 		err = PTR_ERR(fallback_tfm); | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 225 | 		goto out; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 226 | 	} | 
 | 227 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 228 | 	ctx->fallback = fallback_tfm; | 
 | 229 | 	hash->descsize += crypto_shash_descsize(fallback_tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 230 | 	return 0; | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 231 |  | 
| Herbert Xu | 7d02460 | 2009-07-10 17:26:44 +0800 | [diff] [blame] | 232 | out: | 
 | 233 | 	return err; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 234 | } | 
 | 235 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 236 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 
 | 237 | { | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 238 | 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 239 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 240 | 	crypto_free_shash(ctx->fallback); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 241 | } | 
 | 242 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 243 | static struct shash_alg sha1_alg = { | 
 | 244 | 	.digestsize	=	SHA1_DIGEST_SIZE, | 
 | 245 | 	.init   	= 	padlock_sha_init, | 
 | 246 | 	.update 	=	padlock_sha_update, | 
 | 247 | 	.finup  	=	padlock_sha1_finup, | 
 | 248 | 	.final  	=	padlock_sha1_final, | 
| Herbert Xu | a8d7ac2 | 2010-02-01 09:17:56 +1100 | [diff] [blame] | 249 | 	.export		=	padlock_sha_export, | 
 | 250 | 	.import		=	padlock_sha_import, | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 251 | 	.descsize	=	sizeof(struct padlock_sha_desc), | 
| Herbert Xu | a8d7ac2 | 2010-02-01 09:17:56 +1100 | [diff] [blame] | 252 | 	.statesize	=	sizeof(struct sha1_state), | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 253 | 	.base		=	{ | 
 | 254 | 		.cra_name		=	"sha1", | 
 | 255 | 		.cra_driver_name	=	"sha1-padlock", | 
 | 256 | 		.cra_priority		=	PADLOCK_CRA_PRIORITY, | 
 | 257 | 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH | | 
 | 258 | 						CRYPTO_ALG_NEED_FALLBACK, | 
 | 259 | 		.cra_blocksize		=	SHA1_BLOCK_SIZE, | 
 | 260 | 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx), | 
 | 261 | 		.cra_module		=	THIS_MODULE, | 
 | 262 | 		.cra_init		=	padlock_cra_init, | 
 | 263 | 		.cra_exit		=	padlock_cra_exit, | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 264 | 	} | 
 | 265 | }; | 
 | 266 |  | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 267 | static struct shash_alg sha256_alg = { | 
 | 268 | 	.digestsize	=	SHA256_DIGEST_SIZE, | 
 | 269 | 	.init   	= 	padlock_sha_init, | 
 | 270 | 	.update 	=	padlock_sha_update, | 
 | 271 | 	.finup  	=	padlock_sha256_finup, | 
 | 272 | 	.final  	=	padlock_sha256_final, | 
| Herbert Xu | a8d7ac2 | 2010-02-01 09:17:56 +1100 | [diff] [blame] | 273 | 	.export		=	padlock_sha_export, | 
 | 274 | 	.import		=	padlock_sha_import, | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 275 | 	.descsize	=	sizeof(struct padlock_sha_desc), | 
| Herbert Xu | a8d7ac2 | 2010-02-01 09:17:56 +1100 | [diff] [blame] | 276 | 	.statesize	=	sizeof(struct sha256_state), | 
| Herbert Xu | bbbee46 | 2009-07-11 18:16:16 +0800 | [diff] [blame] | 277 | 	.base		=	{ | 
 | 278 | 		.cra_name		=	"sha256", | 
 | 279 | 		.cra_driver_name	=	"sha256-padlock", | 
 | 280 | 		.cra_priority		=	PADLOCK_CRA_PRIORITY, | 
 | 281 | 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH | | 
 | 282 | 						CRYPTO_ALG_NEED_FALLBACK, | 
 | 283 | 		.cra_blocksize		=	SHA256_BLOCK_SIZE, | 
 | 284 | 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx), | 
 | 285 | 		.cra_module		=	THIS_MODULE, | 
 | 286 | 		.cra_init		=	padlock_cra_init, | 
 | 287 | 		.cra_exit		=	padlock_cra_exit, | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 288 | 	} | 
 | 289 | }; | 
 | 290 |  | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 291 | /* Add two shash_alg instance for hardware-implemented * | 
 | 292 | * multiple-parts hash supported by VIA Nano Processor.*/ | 
 | 293 | static int padlock_sha1_init_nano(struct shash_desc *desc) | 
 | 294 | { | 
 | 295 | 	struct sha1_state *sctx = shash_desc_ctx(desc); | 
 | 296 |  | 
 | 297 | 	*sctx = (struct sha1_state){ | 
 | 298 | 		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, | 
 | 299 | 	}; | 
 | 300 |  | 
 | 301 | 	return 0; | 
 | 302 | } | 
 | 303 |  | 
 | 304 | static int padlock_sha1_update_nano(struct shash_desc *desc, | 
 | 305 | 			const u8 *data,	unsigned int len) | 
 | 306 | { | 
 | 307 | 	struct sha1_state *sctx = shash_desc_ctx(desc); | 
 | 308 | 	unsigned int partial, done; | 
 | 309 | 	const u8 *src; | 
 | 310 | 	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ | 
 | 311 | 	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | 
 | 312 | 		((aligned(STACK_ALIGN))); | 
 | 313 | 	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 
 | 314 | 	int ts_state; | 
 | 315 |  | 
 | 316 | 	partial = sctx->count & 0x3f; | 
 | 317 | 	sctx->count += len; | 
 | 318 | 	done = 0; | 
 | 319 | 	src = data; | 
 | 320 | 	memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); | 
 | 321 |  | 
 | 322 | 	if ((partial + len) >= SHA1_BLOCK_SIZE) { | 
 | 323 |  | 
 | 324 | 		/* Append the bytes in state's buffer to a block to handle */ | 
 | 325 | 		if (partial) { | 
 | 326 | 			done = -partial; | 
 | 327 | 			memcpy(sctx->buffer + partial, data, | 
 | 328 | 				done + SHA1_BLOCK_SIZE); | 
 | 329 | 			src = sctx->buffer; | 
 | 330 | 			ts_state = irq_ts_save(); | 
 | 331 | 			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" | 
 | 332 | 			: "+S"(src), "+D"(dst) \ | 
 | 333 | 			: "a"((long)-1), "c"((unsigned long)1)); | 
 | 334 | 			irq_ts_restore(ts_state); | 
 | 335 | 			done += SHA1_BLOCK_SIZE; | 
 | 336 | 			src = data + done; | 
 | 337 | 		} | 
 | 338 |  | 
 | 339 | 		/* Process the left bytes from the input data */ | 
 | 340 | 		if (len - done >= SHA1_BLOCK_SIZE) { | 
 | 341 | 			ts_state = irq_ts_save(); | 
 | 342 | 			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" | 
 | 343 | 			: "+S"(src), "+D"(dst) | 
 | 344 | 			: "a"((long)-1), | 
 | 345 | 			"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); | 
 | 346 | 			irq_ts_restore(ts_state); | 
 | 347 | 			done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); | 
 | 348 | 			src = data + done; | 
 | 349 | 		} | 
 | 350 | 		partial = 0; | 
 | 351 | 	} | 
 | 352 | 	memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); | 
 | 353 | 	memcpy(sctx->buffer + partial, src, len - done); | 
 | 354 |  | 
 | 355 | 	return 0; | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) | 
 | 359 | { | 
 | 360 | 	struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); | 
 | 361 | 	unsigned int partial, padlen; | 
 | 362 | 	__be64 bits; | 
 | 363 | 	static const u8 padding[64] = { 0x80, }; | 
 | 364 |  | 
 | 365 | 	bits = cpu_to_be64(state->count << 3); | 
 | 366 |  | 
 | 367 | 	/* Pad out to 56 mod 64 */ | 
 | 368 | 	partial = state->count & 0x3f; | 
 | 369 | 	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); | 
 | 370 | 	padlock_sha1_update_nano(desc, padding, padlen); | 
 | 371 |  | 
 | 372 | 	/* Append length field bytes */ | 
 | 373 | 	padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); | 
 | 374 |  | 
 | 375 | 	/* Swap to output */ | 
 | 376 | 	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); | 
 | 377 |  | 
 | 378 | 	return 0; | 
 | 379 | } | 
 | 380 |  | 
 | 381 | static int padlock_sha256_init_nano(struct shash_desc *desc) | 
 | 382 | { | 
 | 383 | 	struct sha256_state *sctx = shash_desc_ctx(desc); | 
 | 384 |  | 
 | 385 | 	*sctx = (struct sha256_state){ | 
 | 386 | 		.state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \ | 
 | 387 | 				SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7}, | 
 | 388 | 	}; | 
 | 389 |  | 
 | 390 | 	return 0; | 
 | 391 | } | 
 | 392 |  | 
 | 393 | static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, | 
 | 394 | 			  unsigned int len) | 
 | 395 | { | 
 | 396 | 	struct sha256_state *sctx = shash_desc_ctx(desc); | 
 | 397 | 	unsigned int partial, done; | 
 | 398 | 	const u8 *src; | 
 | 399 | 	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/ | 
 | 400 | 	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ | 
 | 401 | 		((aligned(STACK_ALIGN))); | 
 | 402 | 	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); | 
 | 403 | 	int ts_state; | 
 | 404 |  | 
 | 405 | 	partial = sctx->count & 0x3f; | 
 | 406 | 	sctx->count += len; | 
 | 407 | 	done = 0; | 
 | 408 | 	src = data; | 
 | 409 | 	memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); | 
 | 410 |  | 
 | 411 | 	if ((partial + len) >= SHA256_BLOCK_SIZE) { | 
 | 412 |  | 
 | 413 | 		/* Append the bytes in state's buffer to a block to handle */ | 
 | 414 | 		if (partial) { | 
 | 415 | 			done = -partial; | 
 | 416 | 			memcpy(sctx->buf + partial, data, | 
 | 417 | 				done + SHA256_BLOCK_SIZE); | 
 | 418 | 			src = sctx->buf; | 
 | 419 | 			ts_state = irq_ts_save(); | 
 | 420 | 			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" | 
 | 421 | 			: "+S"(src), "+D"(dst) | 
 | 422 | 			: "a"((long)-1), "c"((unsigned long)1)); | 
 | 423 | 			irq_ts_restore(ts_state); | 
 | 424 | 			done += SHA256_BLOCK_SIZE; | 
 | 425 | 			src = data + done; | 
 | 426 | 		} | 
 | 427 |  | 
 | 428 | 		/* Process the left bytes from input data*/ | 
 | 429 | 		if (len - done >= SHA256_BLOCK_SIZE) { | 
 | 430 | 			ts_state = irq_ts_save(); | 
 | 431 | 			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" | 
 | 432 | 			: "+S"(src), "+D"(dst) | 
 | 433 | 			: "a"((long)-1), | 
 | 434 | 			"c"((unsigned long)((len - done) / 64))); | 
 | 435 | 			irq_ts_restore(ts_state); | 
 | 436 | 			done += ((len - done) - (len - done) % 64); | 
 | 437 | 			src = data + done; | 
 | 438 | 		} | 
 | 439 | 		partial = 0; | 
 | 440 | 	} | 
 | 441 | 	memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); | 
 | 442 | 	memcpy(sctx->buf + partial, src, len - done); | 
 | 443 |  | 
 | 444 | 	return 0; | 
 | 445 | } | 
 | 446 |  | 
 | 447 | static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) | 
 | 448 | { | 
 | 449 | 	struct sha256_state *state = | 
 | 450 | 		(struct sha256_state *)shash_desc_ctx(desc); | 
 | 451 | 	unsigned int partial, padlen; | 
 | 452 | 	__be64 bits; | 
 | 453 | 	static const u8 padding[64] = { 0x80, }; | 
 | 454 |  | 
 | 455 | 	bits = cpu_to_be64(state->count << 3); | 
 | 456 |  | 
 | 457 | 	/* Pad out to 56 mod 64 */ | 
 | 458 | 	partial = state->count & 0x3f; | 
 | 459 | 	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); | 
 | 460 | 	padlock_sha256_update_nano(desc, padding, padlen); | 
 | 461 |  | 
 | 462 | 	/* Append length field bytes */ | 
 | 463 | 	padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); | 
 | 464 |  | 
 | 465 | 	/* Swap to output */ | 
 | 466 | 	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); | 
 | 467 |  | 
 | 468 | 	return 0; | 
 | 469 | } | 
 | 470 |  | 
 | 471 | static int padlock_sha_export_nano(struct shash_desc *desc, | 
 | 472 | 				void *out) | 
 | 473 | { | 
 | 474 | 	int statesize = crypto_shash_statesize(desc->tfm); | 
 | 475 | 	void *sctx = shash_desc_ctx(desc); | 
 | 476 |  | 
 | 477 | 	memcpy(out, sctx, statesize); | 
 | 478 | 	return 0; | 
 | 479 | } | 
 | 480 |  | 
 | 481 | static int padlock_sha_import_nano(struct shash_desc *desc, | 
 | 482 | 				const void *in) | 
 | 483 | { | 
 | 484 | 	int statesize = crypto_shash_statesize(desc->tfm); | 
 | 485 | 	void *sctx = shash_desc_ctx(desc); | 
 | 486 |  | 
 | 487 | 	memcpy(sctx, in, statesize); | 
 | 488 | 	return 0; | 
 | 489 | } | 
 | 490 |  | 
 | 491 | static struct shash_alg sha1_alg_nano = { | 
 | 492 | 	.digestsize	=	SHA1_DIGEST_SIZE, | 
 | 493 | 	.init		=	padlock_sha1_init_nano, | 
 | 494 | 	.update		=	padlock_sha1_update_nano, | 
 | 495 | 	.final		=	padlock_sha1_final_nano, | 
 | 496 | 	.export		=	padlock_sha_export_nano, | 
 | 497 | 	.import		=	padlock_sha_import_nano, | 
 | 498 | 	.descsize	=	sizeof(struct sha1_state), | 
 | 499 | 	.statesize	=	sizeof(struct sha1_state), | 
 | 500 | 	.base		=	{ | 
 | 501 | 		.cra_name		=	"sha1", | 
 | 502 | 		.cra_driver_name	=	"sha1-padlock-nano", | 
 | 503 | 		.cra_priority		=	PADLOCK_CRA_PRIORITY, | 
 | 504 | 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH, | 
 | 505 | 		.cra_blocksize		=	SHA1_BLOCK_SIZE, | 
 | 506 | 		.cra_module		=	THIS_MODULE, | 
 | 507 | 	} | 
 | 508 | }; | 
 | 509 |  | 
 | 510 | static struct shash_alg sha256_alg_nano = { | 
 | 511 | 	.digestsize	=	SHA256_DIGEST_SIZE, | 
 | 512 | 	.init		=	padlock_sha256_init_nano, | 
 | 513 | 	.update		=	padlock_sha256_update_nano, | 
 | 514 | 	.final		=	padlock_sha256_final_nano, | 
 | 515 | 	.export		=	padlock_sha_export_nano, | 
 | 516 | 	.import		=	padlock_sha_import_nano, | 
 | 517 | 	.descsize	=	sizeof(struct sha256_state), | 
 | 518 | 	.statesize	=	sizeof(struct sha256_state), | 
 | 519 | 	.base		=	{ | 
 | 520 | 		.cra_name		=	"sha256", | 
 | 521 | 		.cra_driver_name	=	"sha256-padlock-nano", | 
 | 522 | 		.cra_priority		=	PADLOCK_CRA_PRIORITY, | 
 | 523 | 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH, | 
 | 524 | 		.cra_blocksize		=	SHA256_BLOCK_SIZE, | 
 | 525 | 		.cra_module		=	THIS_MODULE, | 
 | 526 | 	} | 
 | 527 | }; | 
 | 528 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 529 | static int __init padlock_init(void) | 
 | 530 | { | 
 | 531 | 	int rc = -ENODEV; | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 532 | 	struct cpuinfo_x86 *c = &cpu_data(0); | 
 | 533 | 	struct shash_alg *sha1; | 
 | 534 | 	struct shash_alg *sha256; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 535 |  | 
 | 536 | 	if (!cpu_has_phe) { | 
| Jeremy Katz | b43e726 | 2008-07-03 19:03:31 +0800 | [diff] [blame] | 537 | 		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 538 | 		return -ENODEV; | 
 | 539 | 	} | 
 | 540 |  | 
 | 541 | 	if (!cpu_has_phe_enabled) { | 
| Jeremy Katz | b43e726 | 2008-07-03 19:03:31 +0800 | [diff] [blame] | 542 | 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 543 | 		return -ENODEV; | 
 | 544 | 	} | 
 | 545 |  | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 546 | 	/* Register the newly added algorithm module if on * | 
 | 547 | 	* VIA Nano processor, or else just do as before */ | 
 | 548 | 	if (c->x86_model < 0x0f) { | 
 | 549 | 		sha1 = &sha1_alg; | 
 | 550 | 		sha256 = &sha256_alg; | 
 | 551 | 	} else { | 
 | 552 | 		sha1 = &sha1_alg_nano; | 
 | 553 | 		sha256 = &sha256_alg_nano; | 
 | 554 | 	} | 
 | 555 |  | 
 | 556 | 	rc = crypto_register_shash(sha1); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 557 | 	if (rc) | 
 | 558 | 		goto out; | 
 | 559 |  | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 560 | 	rc = crypto_register_shash(sha256); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 561 | 	if (rc) | 
 | 562 | 		goto out_unreg1; | 
 | 563 |  | 
 | 564 | 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); | 
 | 565 |  | 
 | 566 | 	return 0; | 
 | 567 |  | 
 | 568 | out_unreg1: | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 569 | 	crypto_unregister_shash(sha1); | 
 | 570 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 571 | out: | 
 | 572 | 	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 
 | 573 | 	return rc; | 
 | 574 | } | 
 | 575 |  | 
 | 576 | static void __exit padlock_fini(void) | 
 | 577 | { | 
| Brilly Wu | 0475add | 2011-03-27 10:45:00 +0800 | [diff] [blame] | 578 | 	struct cpuinfo_x86 *c = &cpu_data(0); | 
 | 579 |  | 
 | 580 | 	if (c->x86_model >= 0x0f) { | 
 | 581 | 		crypto_unregister_shash(&sha1_alg_nano); | 
 | 582 | 		crypto_unregister_shash(&sha256_alg_nano); | 
 | 583 | 	} else { | 
 | 584 | 		crypto_unregister_shash(&sha1_alg); | 
 | 585 | 		crypto_unregister_shash(&sha256_alg); | 
 | 586 | 	} | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 587 | } | 
 | 588 |  | 
 | 589 | module_init(padlock_init); | 
 | 590 | module_exit(padlock_fini); | 
 | 591 |  | 
 | 592 | MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); | 
 | 593 | MODULE_LICENSE("GPL"); | 
 | 594 | MODULE_AUTHOR("Michal Ludvig"); | 
 | 595 |  | 
| Herbert Xu | a760a66 | 2009-02-26 14:06:31 +0800 | [diff] [blame] | 596 | MODULE_ALIAS("sha1-all"); | 
 | 597 | MODULE_ALIAS("sha256-all"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 598 | MODULE_ALIAS("sha1-padlock"); | 
 | 599 | MODULE_ALIAS("sha256-padlock"); |