| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Cryptographic API. | 
|  | 3 | * | 
|  | 4 | * Support for VIA PadLock hardware crypto engine. | 
|  | 5 | * | 
|  | 6 | * Copyright (c) 2006  Michal Ludvig <michal@logix.cz> | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or modify | 
|  | 9 | * it under the terms of the GNU General Public License as published by | 
|  | 10 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 11 | * (at your option) any later version. | 
|  | 12 | * | 
|  | 13 | */ | 
|  | 14 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 15 | #include <crypto/algapi.h> | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 16 | #include <crypto/sha.h> | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 17 | #include <linux/err.h> | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 18 | #include <linux/module.h> | 
|  | 19 | #include <linux/init.h> | 
|  | 20 | #include <linux/errno.h> | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 21 | #include <linux/cryptohash.h> | 
|  | 22 | #include <linux/interrupt.h> | 
|  | 23 | #include <linux/kernel.h> | 
|  | 24 | #include <linux/scatterlist.h> | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 25 | #include <asm/i387.h> | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 26 | #include "padlock.h" | 
|  | 27 |  | 
|  | 28 | #define SHA1_DEFAULT_FALLBACK	"sha1-generic" | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 29 | #define SHA256_DEFAULT_FALLBACK "sha256-generic" | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 30 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 31 | struct padlock_sha_ctx { | 
|  | 32 | char		*data; | 
|  | 33 | size_t		used; | 
|  | 34 | int		bypass; | 
|  | 35 | void (*f_sha_padlock)(const char *in, char *out, int count); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 36 | struct hash_desc fallback; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 37 | }; | 
|  | 38 |  | 
|  | 39 | static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) | 
|  | 40 | { | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 41 | return crypto_tfm_ctx(tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 42 | } | 
|  | 43 |  | 
|  | 44 | /* We'll need aligned address on the stack */ | 
|  | 45 | #define NEAREST_ALIGNED(ptr) \ | 
|  | 46 | ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) | 
|  | 47 |  | 
|  | 48 | static struct crypto_alg sha1_alg, sha256_alg; | 
|  | 49 |  | 
|  | 50 | static void padlock_sha_bypass(struct crypto_tfm *tfm) | 
|  | 51 | { | 
|  | 52 | if (ctx(tfm)->bypass) | 
|  | 53 | return; | 
|  | 54 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 55 | crypto_hash_init(&ctx(tfm)->fallback); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 56 | if (ctx(tfm)->data && ctx(tfm)->used) { | 
|  | 57 | struct scatterlist sg; | 
|  | 58 |  | 
| Herbert Xu | 68e3f5d | 2007-10-27 00:52:07 -0700 | [diff] [blame] | 59 | sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 60 | crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 61 | } | 
|  | 62 |  | 
|  | 63 | ctx(tfm)->used = 0; | 
|  | 64 | ctx(tfm)->bypass = 1; | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 | static void padlock_sha_init(struct crypto_tfm *tfm) | 
|  | 68 | { | 
|  | 69 | ctx(tfm)->used = 0; | 
|  | 70 | ctx(tfm)->bypass = 0; | 
|  | 71 | } | 
|  | 72 |  | 
|  | 73 | static void padlock_sha_update(struct crypto_tfm *tfm, | 
|  | 74 | const uint8_t *data, unsigned int length) | 
|  | 75 | { | 
|  | 76 | /* Our buffer is always one page. */ | 
|  | 77 | if (unlikely(!ctx(tfm)->bypass && | 
|  | 78 | (ctx(tfm)->used + length > PAGE_SIZE))) | 
|  | 79 | padlock_sha_bypass(tfm); | 
|  | 80 |  | 
|  | 81 | if (unlikely(ctx(tfm)->bypass)) { | 
|  | 82 | struct scatterlist sg; | 
| Herbert Xu | 68e3f5d | 2007-10-27 00:52:07 -0700 | [diff] [blame] | 83 | sg_init_one(&sg, (uint8_t *)data, length); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 84 | crypto_hash_update(&ctx(tfm)->fallback, &sg, length); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 85 | return; | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); | 
|  | 89 | ctx(tfm)->used += length; | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | static inline void padlock_output_block(uint32_t *src, | 
|  | 93 | uint32_t *dst, size_t count) | 
|  | 94 | { | 
|  | 95 | while (count--) | 
|  | 96 | *dst++ = swab32(*src++); | 
|  | 97 | } | 
|  | 98 |  | 
| Adrian Bunk | cb17530 | 2006-07-15 11:31:25 +1000 | [diff] [blame] | 99 | static void padlock_do_sha1(const char *in, char *out, int count) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 100 | { | 
|  | 101 | /* We can't store directly to *out as it may be unaligned. */ | 
|  | 102 | /* BTW Don't reduce the buffer size below 128 Bytes! | 
|  | 103 | *     PadLock microcode needs it that big. */ | 
|  | 104 | char buf[128+16]; | 
|  | 105 | char *result = NEAREST_ALIGNED(buf); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 106 | int ts_state; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 107 |  | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 108 | ((uint32_t *)result)[0] = SHA1_H0; | 
|  | 109 | ((uint32_t *)result)[1] = SHA1_H1; | 
|  | 110 | ((uint32_t *)result)[2] = SHA1_H2; | 
|  | 111 | ((uint32_t *)result)[3] = SHA1_H3; | 
|  | 112 | ((uint32_t *)result)[4] = SHA1_H4; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 113 |  | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 114 | /* prevent taking the spurious DNA fault with padlock. */ | 
|  | 115 | ts_state = irq_ts_save(); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 116 | asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ | 
|  | 117 | : "+S"(in), "+D"(result) | 
|  | 118 | : "c"(count), "a"(0)); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 119 | irq_ts_restore(ts_state); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 120 |  | 
|  | 121 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); | 
|  | 122 | } | 
|  | 123 |  | 
| Adrian Bunk | cb17530 | 2006-07-15 11:31:25 +1000 | [diff] [blame] | 124 | static void padlock_do_sha256(const char *in, char *out, int count) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 125 | { | 
|  | 126 | /* We can't store directly to *out as it may be unaligned. */ | 
|  | 127 | /* BTW Don't reduce the buffer size below 128 Bytes! | 
|  | 128 | *     PadLock microcode needs it that big. */ | 
|  | 129 | char buf[128+16]; | 
|  | 130 | char *result = NEAREST_ALIGNED(buf); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 131 | int ts_state; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 132 |  | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 133 | ((uint32_t *)result)[0] = SHA256_H0; | 
|  | 134 | ((uint32_t *)result)[1] = SHA256_H1; | 
|  | 135 | ((uint32_t *)result)[2] = SHA256_H2; | 
|  | 136 | ((uint32_t *)result)[3] = SHA256_H3; | 
|  | 137 | ((uint32_t *)result)[4] = SHA256_H4; | 
|  | 138 | ((uint32_t *)result)[5] = SHA256_H5; | 
|  | 139 | ((uint32_t *)result)[6] = SHA256_H6; | 
|  | 140 | ((uint32_t *)result)[7] = SHA256_H7; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 141 |  | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 142 | /* prevent taking the spurious DNA fault with padlock. */ | 
|  | 143 | ts_state = irq_ts_save(); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 144 | asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ | 
|  | 145 | : "+S"(in), "+D"(result) | 
|  | 146 | : "c"(count), "a"(0)); | 
| Suresh Siddha | e491401 | 2008-08-13 22:02:26 +1000 | [diff] [blame] | 147 | irq_ts_restore(ts_state); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 148 |  | 
|  | 149 | padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) | 
|  | 153 | { | 
|  | 154 | if (unlikely(ctx(tfm)->bypass)) { | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 155 | crypto_hash_final(&ctx(tfm)->fallback, out); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 156 | ctx(tfm)->bypass = 0; | 
|  | 157 | return; | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | /* Pass the input buffer to PadLock microcode... */ | 
|  | 161 | ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); | 
|  | 162 |  | 
|  | 163 | ctx(tfm)->used = 0; | 
|  | 164 | } | 
|  | 165 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 166 | static int padlock_cra_init(struct crypto_tfm *tfm) | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 167 | { | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 168 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | 
|  | 169 | struct crypto_hash *fallback_tfm; | 
|  | 170 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 171 | /* For now we'll allocate one page. This | 
|  | 172 | * could eventually be configurable one day. */ | 
|  | 173 | ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); | 
|  | 174 | if (!ctx(tfm)->data) | 
|  | 175 | return -ENOMEM; | 
|  | 176 |  | 
|  | 177 | /* Allocate a fallback and abort if it failed. */ | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 178 | fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, | 
|  | 179 | CRYPTO_ALG_ASYNC | | 
|  | 180 | CRYPTO_ALG_NEED_FALLBACK); | 
|  | 181 | if (IS_ERR(fallback_tfm)) { | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 182 | printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", | 
|  | 183 | fallback_driver_name); | 
|  | 184 | free_page((unsigned long)(ctx(tfm)->data)); | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 185 | return PTR_ERR(fallback_tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 186 | } | 
|  | 187 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 188 | ctx(tfm)->fallback.tfm = fallback_tfm; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 189 | return 0; | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | static int padlock_sha1_cra_init(struct crypto_tfm *tfm) | 
|  | 193 | { | 
|  | 194 | ctx(tfm)->f_sha_padlock = padlock_do_sha1; | 
|  | 195 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 196 | return padlock_cra_init(tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 197 | } | 
|  | 198 |  | 
|  | 199 | static int padlock_sha256_cra_init(struct crypto_tfm *tfm) | 
|  | 200 | { | 
|  | 201 | ctx(tfm)->f_sha_padlock = padlock_do_sha256; | 
|  | 202 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 203 | return padlock_cra_init(tfm); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 204 | } | 
|  | 205 |  | 
|  | 206 | static void padlock_cra_exit(struct crypto_tfm *tfm) | 
|  | 207 | { | 
|  | 208 | if (ctx(tfm)->data) { | 
|  | 209 | free_page((unsigned long)(ctx(tfm)->data)); | 
|  | 210 | ctx(tfm)->data = NULL; | 
|  | 211 | } | 
|  | 212 |  | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 213 | crypto_free_hash(ctx(tfm)->fallback.tfm); | 
|  | 214 | ctx(tfm)->fallback.tfm = NULL; | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 215 | } | 
|  | 216 |  | 
|  | 217 | static struct crypto_alg sha1_alg = { | 
|  | 218 | .cra_name		=	"sha1", | 
|  | 219 | .cra_driver_name	=	"sha1-padlock", | 
|  | 220 | .cra_priority		=	PADLOCK_CRA_PRIORITY, | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 221 | .cra_flags		=	CRYPTO_ALG_TYPE_DIGEST | | 
|  | 222 | CRYPTO_ALG_NEED_FALLBACK, | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 223 | .cra_blocksize		=	SHA1_BLOCK_SIZE, | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 224 | .cra_ctxsize		=	sizeof(struct padlock_sha_ctx), | 
|  | 225 | .cra_module		=	THIS_MODULE, | 
|  | 226 | .cra_list		=	LIST_HEAD_INIT(sha1_alg.cra_list), | 
|  | 227 | .cra_init		=	padlock_sha1_cra_init, | 
|  | 228 | .cra_exit		=	padlock_cra_exit, | 
|  | 229 | .cra_u			=	{ | 
|  | 230 | .digest = { | 
|  | 231 | .dia_digestsize	=	SHA1_DIGEST_SIZE, | 
|  | 232 | .dia_init   	= 	padlock_sha_init, | 
|  | 233 | .dia_update 	=	padlock_sha_update, | 
|  | 234 | .dia_final  	=	padlock_sha_final, | 
|  | 235 | } | 
|  | 236 | } | 
|  | 237 | }; | 
|  | 238 |  | 
|  | 239 | static struct crypto_alg sha256_alg = { | 
|  | 240 | .cra_name		=	"sha256", | 
|  | 241 | .cra_driver_name	=	"sha256-padlock", | 
|  | 242 | .cra_priority		=	PADLOCK_CRA_PRIORITY, | 
| Herbert Xu | 6010439 | 2006-08-26 18:34:10 +1000 | [diff] [blame] | 243 | .cra_flags		=	CRYPTO_ALG_TYPE_DIGEST | | 
|  | 244 | CRYPTO_ALG_NEED_FALLBACK, | 
| Jan Glauber | 5265eeb | 2007-10-09 22:43:13 +0800 | [diff] [blame] | 245 | .cra_blocksize		=	SHA256_BLOCK_SIZE, | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 246 | .cra_ctxsize		=	sizeof(struct padlock_sha_ctx), | 
|  | 247 | .cra_module		=	THIS_MODULE, | 
|  | 248 | .cra_list		=	LIST_HEAD_INIT(sha256_alg.cra_list), | 
|  | 249 | .cra_init		=	padlock_sha256_cra_init, | 
|  | 250 | .cra_exit		=	padlock_cra_exit, | 
|  | 251 | .cra_u			=	{ | 
|  | 252 | .digest = { | 
|  | 253 | .dia_digestsize	=	SHA256_DIGEST_SIZE, | 
|  | 254 | .dia_init   	= 	padlock_sha_init, | 
|  | 255 | .dia_update 	=	padlock_sha_update, | 
|  | 256 | .dia_final  	=	padlock_sha_final, | 
|  | 257 | } | 
|  | 258 | } | 
|  | 259 | }; | 
|  | 260 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 261 | static int __init padlock_init(void) | 
|  | 262 | { | 
|  | 263 | int rc = -ENODEV; | 
|  | 264 |  | 
|  | 265 | if (!cpu_has_phe) { | 
| Jeremy Katz | b43e726 | 2008-07-03 19:03:31 +0800 | [diff] [blame] | 266 | printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 267 | return -ENODEV; | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | if (!cpu_has_phe_enabled) { | 
| Jeremy Katz | b43e726 | 2008-07-03 19:03:31 +0800 | [diff] [blame] | 271 | printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 272 | return -ENODEV; | 
|  | 273 | } | 
|  | 274 |  | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 275 | rc = crypto_register_alg(&sha1_alg); | 
|  | 276 | if (rc) | 
|  | 277 | goto out; | 
|  | 278 |  | 
|  | 279 | rc = crypto_register_alg(&sha256_alg); | 
|  | 280 | if (rc) | 
|  | 281 | goto out_unreg1; | 
|  | 282 |  | 
|  | 283 | printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); | 
|  | 284 |  | 
|  | 285 | return 0; | 
|  | 286 |  | 
|  | 287 | out_unreg1: | 
|  | 288 | crypto_unregister_alg(&sha1_alg); | 
|  | 289 | out: | 
|  | 290 | printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); | 
|  | 291 | return rc; | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | static void __exit padlock_fini(void) | 
|  | 295 | { | 
|  | 296 | crypto_unregister_alg(&sha1_alg); | 
|  | 297 | crypto_unregister_alg(&sha256_alg); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | module_init(padlock_init); | 
|  | 301 | module_exit(padlock_fini); | 
|  | 302 |  | 
|  | 303 | MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); | 
|  | 304 | MODULE_LICENSE("GPL"); | 
|  | 305 | MODULE_AUTHOR("Michal Ludvig"); | 
|  | 306 |  | 
| Herbert Xu | a760a66 | 2009-02-26 14:06:31 +0800 | [diff] [blame] | 307 | MODULE_ALIAS("sha1-all"); | 
|  | 308 | MODULE_ALIAS("sha256-all"); | 
| Michal Ludvig | 6c83327 | 2006-07-12 12:29:38 +1000 | [diff] [blame] | 309 | MODULE_ALIAS("sha1-padlock"); | 
|  | 310 | MODULE_ALIAS("sha256-padlock"); |