| David S. Miller | 86c93b2 | 2012-08-19 17:11:37 -0700 | [diff] [blame] | 1 | /* Glue code for SHA256 hashing optimized for sparc64 crypto opcodes. | 
|  | 2 | * | 
|  | 3 | * This is based largely upon crypto/sha256_generic.c | 
|  | 4 | * | 
|  | 5 | * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> | 
|  | 6 | * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> | 
|  | 7 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | 
|  | 8 | * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> | 
|  | 9 | */ | 
|  | 10 |  | 
|  | 11 | #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt | 
|  | 12 |  | 
|  | 13 | #include <crypto/internal/hash.h> | 
|  | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/mm.h> | 
|  | 17 | #include <linux/cryptohash.h> | 
|  | 18 | #include <linux/types.h> | 
|  | 19 | #include <crypto/sha.h> | 
|  | 20 |  | 
|  | 21 | #include <asm/pstate.h> | 
|  | 22 | #include <asm/elf.h> | 
|  | 23 |  | 
| David S. Miller | 1080362 | 2012-09-15 09:06:30 -0700 | [diff] [blame] | 24 | #include "opcodes.h" | 
|  | 25 |  | 
| David S. Miller | 86c93b2 | 2012-08-19 17:11:37 -0700 | [diff] [blame] | 26 | asmlinkage void sha256_sparc64_transform(u32 *digest, const char *data, | 
|  | 27 | unsigned int rounds); | 
|  | 28 |  | 
|  | 29 | static int sha224_sparc64_init(struct shash_desc *desc) | 
|  | 30 | { | 
|  | 31 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 32 | sctx->state[0] = SHA224_H0; | 
|  | 33 | sctx->state[1] = SHA224_H1; | 
|  | 34 | sctx->state[2] = SHA224_H2; | 
|  | 35 | sctx->state[3] = SHA224_H3; | 
|  | 36 | sctx->state[4] = SHA224_H4; | 
|  | 37 | sctx->state[5] = SHA224_H5; | 
|  | 38 | sctx->state[6] = SHA224_H6; | 
|  | 39 | sctx->state[7] = SHA224_H7; | 
|  | 40 | sctx->count = 0; | 
|  | 41 |  | 
|  | 42 | return 0; | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | static int sha256_sparc64_init(struct shash_desc *desc) | 
|  | 46 | { | 
|  | 47 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 48 | sctx->state[0] = SHA256_H0; | 
|  | 49 | sctx->state[1] = SHA256_H1; | 
|  | 50 | sctx->state[2] = SHA256_H2; | 
|  | 51 | sctx->state[3] = SHA256_H3; | 
|  | 52 | sctx->state[4] = SHA256_H4; | 
|  | 53 | sctx->state[5] = SHA256_H5; | 
|  | 54 | sctx->state[6] = SHA256_H6; | 
|  | 55 | sctx->state[7] = SHA256_H7; | 
|  | 56 | sctx->count = 0; | 
|  | 57 |  | 
|  | 58 | return 0; | 
|  | 59 | } | 
|  | 60 |  | 
|  | 61 | static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data, | 
|  | 62 | unsigned int len, unsigned int partial) | 
|  | 63 | { | 
|  | 64 | unsigned int done = 0; | 
|  | 65 |  | 
|  | 66 | sctx->count += len; | 
|  | 67 | if (partial) { | 
|  | 68 | done = SHA256_BLOCK_SIZE - partial; | 
|  | 69 | memcpy(sctx->buf + partial, data, done); | 
|  | 70 | sha256_sparc64_transform(sctx->state, sctx->buf, 1); | 
|  | 71 | } | 
|  | 72 | if (len - done >= SHA256_BLOCK_SIZE) { | 
|  | 73 | const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE; | 
|  | 74 |  | 
|  | 75 | sha256_sparc64_transform(sctx->state, data + done, rounds); | 
|  | 76 | done += rounds * SHA256_BLOCK_SIZE; | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | memcpy(sctx->buf, data + done, len - done); | 
|  | 80 | } | 
|  | 81 |  | 
|  | 82 | static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, | 
|  | 83 | unsigned int len) | 
|  | 84 | { | 
|  | 85 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 86 | unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; | 
|  | 87 |  | 
|  | 88 | /* Handle the fast case right here */ | 
|  | 89 | if (partial + len < SHA256_BLOCK_SIZE) { | 
|  | 90 | sctx->count += len; | 
|  | 91 | memcpy(sctx->buf + partial, data, len); | 
|  | 92 | } else | 
|  | 93 | __sha256_sparc64_update(sctx, data, len, partial); | 
|  | 94 |  | 
|  | 95 | return 0; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) | 
|  | 99 | { | 
|  | 100 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 101 | unsigned int i, index, padlen; | 
|  | 102 | __be32 *dst = (__be32 *)out; | 
|  | 103 | __be64 bits; | 
|  | 104 | static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, }; | 
|  | 105 |  | 
|  | 106 | bits = cpu_to_be64(sctx->count << 3); | 
|  | 107 |  | 
|  | 108 | /* Pad out to 56 mod 64 and append length */ | 
|  | 109 | index = sctx->count % SHA256_BLOCK_SIZE; | 
|  | 110 | padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56) - index); | 
|  | 111 |  | 
|  | 112 | /* We need to fill a whole block for __sha256_sparc64_update() */ | 
|  | 113 | if (padlen <= 56) { | 
|  | 114 | sctx->count += padlen; | 
|  | 115 | memcpy(sctx->buf + index, padding, padlen); | 
|  | 116 | } else { | 
|  | 117 | __sha256_sparc64_update(sctx, padding, padlen, index); | 
|  | 118 | } | 
|  | 119 | __sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56); | 
|  | 120 |  | 
|  | 121 | /* Store state in digest */ | 
|  | 122 | for (i = 0; i < 8; i++) | 
|  | 123 | dst[i] = cpu_to_be32(sctx->state[i]); | 
|  | 124 |  | 
|  | 125 | /* Wipe context */ | 
|  | 126 | memset(sctx, 0, sizeof(*sctx)); | 
|  | 127 |  | 
|  | 128 | return 0; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) | 
|  | 132 | { | 
|  | 133 | u8 D[SHA256_DIGEST_SIZE]; | 
|  | 134 |  | 
|  | 135 | sha256_sparc64_final(desc, D); | 
|  | 136 |  | 
|  | 137 | memcpy(hash, D, SHA224_DIGEST_SIZE); | 
|  | 138 | memset(D, 0, SHA256_DIGEST_SIZE); | 
|  | 139 |  | 
|  | 140 | return 0; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static int sha256_sparc64_export(struct shash_desc *desc, void *out) | 
|  | 144 | { | 
|  | 145 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 146 |  | 
|  | 147 | memcpy(out, sctx, sizeof(*sctx)); | 
|  | 148 | return 0; | 
|  | 149 | } | 
|  | 150 |  | 
|  | 151 | static int sha256_sparc64_import(struct shash_desc *desc, const void *in) | 
|  | 152 | { | 
|  | 153 | struct sha256_state *sctx = shash_desc_ctx(desc); | 
|  | 154 |  | 
|  | 155 | memcpy(sctx, in, sizeof(*sctx)); | 
|  | 156 | return 0; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | static struct shash_alg sha256 = { | 
|  | 160 | .digestsize	=	SHA256_DIGEST_SIZE, | 
|  | 161 | .init		=	sha256_sparc64_init, | 
|  | 162 | .update		=	sha256_sparc64_update, | 
|  | 163 | .final		=	sha256_sparc64_final, | 
|  | 164 | .export		=	sha256_sparc64_export, | 
|  | 165 | .import		=	sha256_sparc64_import, | 
|  | 166 | .descsize	=	sizeof(struct sha256_state), | 
|  | 167 | .statesize	=	sizeof(struct sha256_state), | 
|  | 168 | .base		=	{ | 
|  | 169 | .cra_name	=	"sha256", | 
|  | 170 | .cra_driver_name=	"sha256-sparc64", | 
| David S. Miller | 1080362 | 2012-09-15 09:06:30 -0700 | [diff] [blame] | 171 | .cra_priority	=	SPARC_CR_OPCODE_PRIORITY, | 
| David S. Miller | 86c93b2 | 2012-08-19 17:11:37 -0700 | [diff] [blame] | 172 | .cra_flags	=	CRYPTO_ALG_TYPE_SHASH, | 
|  | 173 | .cra_blocksize	=	SHA256_BLOCK_SIZE, | 
|  | 174 | .cra_module	=	THIS_MODULE, | 
|  | 175 | } | 
|  | 176 | }; | 
|  | 177 |  | 
|  | 178 | static struct shash_alg sha224 = { | 
|  | 179 | .digestsize	=	SHA224_DIGEST_SIZE, | 
|  | 180 | .init		=	sha224_sparc64_init, | 
|  | 181 | .update		=	sha256_sparc64_update, | 
|  | 182 | .final		=	sha224_sparc64_final, | 
|  | 183 | .descsize	=	sizeof(struct sha256_state), | 
|  | 184 | .base		=	{ | 
|  | 185 | .cra_name	=	"sha224", | 
|  | 186 | .cra_driver_name=	"sha224-sparc64", | 
| David S. Miller | 1080362 | 2012-09-15 09:06:30 -0700 | [diff] [blame] | 187 | .cra_priority	=	SPARC_CR_OPCODE_PRIORITY, | 
| David S. Miller | 86c93b2 | 2012-08-19 17:11:37 -0700 | [diff] [blame] | 188 | .cra_flags	=	CRYPTO_ALG_TYPE_SHASH, | 
|  | 189 | .cra_blocksize	=	SHA224_BLOCK_SIZE, | 
|  | 190 | .cra_module	=	THIS_MODULE, | 
|  | 191 | } | 
|  | 192 | }; | 
|  | 193 |  | 
|  | 194 | static bool __init sparc64_has_sha256_opcode(void) | 
|  | 195 | { | 
|  | 196 | unsigned long cfr; | 
|  | 197 |  | 
|  | 198 | if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO)) | 
|  | 199 | return false; | 
|  | 200 |  | 
|  | 201 | __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); | 
|  | 202 | if (!(cfr & CFR_SHA256)) | 
|  | 203 | return false; | 
|  | 204 |  | 
|  | 205 | return true; | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | static int __init sha256_sparc64_mod_init(void) | 
|  | 209 | { | 
|  | 210 | if (sparc64_has_sha256_opcode()) { | 
|  | 211 | int ret = crypto_register_shash(&sha224); | 
|  | 212 | if (ret < 0) | 
|  | 213 | return ret; | 
|  | 214 |  | 
|  | 215 | ret = crypto_register_shash(&sha256); | 
|  | 216 | if (ret < 0) { | 
|  | 217 | crypto_unregister_shash(&sha224); | 
|  | 218 | return ret; | 
|  | 219 | } | 
|  | 220 |  | 
|  | 221 | pr_info("Using sparc64 sha256 opcode optimized SHA-256/SHA-224 implementation\n"); | 
|  | 222 | return 0; | 
|  | 223 | } | 
|  | 224 | pr_info("sparc64 sha256 opcode not available.\n"); | 
|  | 225 | return -ENODEV; | 
|  | 226 | } | 
|  | 227 |  | 
|  | 228 | static void __exit sha256_sparc64_mod_fini(void) | 
|  | 229 | { | 
|  | 230 | crypto_unregister_shash(&sha224); | 
|  | 231 | crypto_unregister_shash(&sha256); | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | module_init(sha256_sparc64_mod_init); | 
|  | 235 | module_exit(sha256_sparc64_mod_fini); | 
|  | 236 |  | 
|  | 237 | MODULE_LICENSE("GPL"); | 
|  | 238 | MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated"); | 
|  | 239 |  | 
|  | 240 | MODULE_ALIAS("sha224"); | 
|  | 241 | MODULE_ALIAS("sha256"); | 
| David S. Miller | 226f7ce | 2012-11-09 20:53:32 -0800 | [diff] [blame] | 242 |  | 
|  | 243 | #include "crop_devid.c" |