| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License as published by | 
 | 6 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 7 |  * (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA | 
 | 17 |  */ | 
 | 18 | #include <crypto/aead.h> | 
 | 19 | #include <crypto/aes.h> | 
 | 20 | #include <crypto/algapi.h> | 
 | 21 | #include <crypto/authenc.h> | 
 | 22 | #include <crypto/des.h> | 
 | 23 | #include <crypto/md5.h> | 
 | 24 | #include <crypto/sha.h> | 
 | 25 | #include <crypto/internal/skcipher.h> | 
 | 26 | #include <linux/clk.h> | 
 | 27 | #include <linux/crypto.h> | 
 | 28 | #include <linux/delay.h> | 
 | 29 | #include <linux/dma-mapping.h> | 
 | 30 | #include <linux/dmapool.h> | 
 | 31 | #include <linux/err.h> | 
 | 32 | #include <linux/init.h> | 
 | 33 | #include <linux/interrupt.h> | 
 | 34 | #include <linux/io.h> | 
 | 35 | #include <linux/list.h> | 
 | 36 | #include <linux/module.h> | 
| Jamie Iles | 30343ef | 2011-08-01 17:25:19 +0100 | [diff] [blame] | 37 | #include <linux/of.h> | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 38 | #include <linux/platform_device.h> | 
 | 39 | #include <linux/pm.h> | 
 | 40 | #include <linux/rtnetlink.h> | 
 | 41 | #include <linux/scatterlist.h> | 
 | 42 | #include <linux/sched.h> | 
 | 43 | #include <linux/slab.h> | 
 | 44 | #include <linux/timer.h> | 
 | 45 |  | 
 | 46 | #include "picoxcell_crypto_regs.h" | 
 | 47 |  | 
 | 48 | /* | 
 | 49 |  * The threshold for the number of entries in the CMD FIFO available before | 
 | 50 |  * the CMD0_CNT interrupt is raised. Increasing this value will reduce the | 
 | 51 |  * number of interrupts raised to the CPU. | 
 | 52 |  */ | 
 | 53 | #define CMD0_IRQ_THRESHOLD   1 | 
 | 54 |  | 
 | 55 | /* | 
 | 56 |  * The timeout period (in jiffies) for a PDU. When the the number of PDUs in | 
 | 57 |  * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled. | 
 | 58 |  * When there are packets in flight but lower than the threshold, we enable | 
 | 59 |  * the timer and at expiry, attempt to remove any processed packets from the | 
 | 60 |  * queue and if there are still packets left, schedule the timer again. | 
 | 61 |  */ | 
 | 62 | #define PACKET_TIMEOUT	    1 | 
 | 63 |  | 
 | 64 | /* The priority to register each algorithm with. */ | 
 | 65 | #define SPACC_CRYPTO_ALG_PRIORITY	10000 | 
 | 66 |  | 
 | 67 | #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN	16 | 
 | 68 | #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64 | 
 | 69 | #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ	64 | 
 | 70 | #define SPACC_CRYPTO_IPSEC_MAX_CTXS	32 | 
 | 71 | #define SPACC_CRYPTO_IPSEC_FIFO_SZ	32 | 
 | 72 | #define SPACC_CRYPTO_L2_CIPHER_PG_SZ	64 | 
 | 73 | #define SPACC_CRYPTO_L2_HASH_PG_SZ	64 | 
 | 74 | #define SPACC_CRYPTO_L2_MAX_CTXS	128 | 
 | 75 | #define SPACC_CRYPTO_L2_FIFO_SZ		128 | 
 | 76 |  | 
 | 77 | #define MAX_DDT_LEN			16 | 
 | 78 |  | 
 | 79 | /* DDT format. This must match the hardware DDT format exactly. */ | 
 | 80 | struct spacc_ddt { | 
 | 81 | 	dma_addr_t	p; | 
 | 82 | 	u32		len; | 
 | 83 | }; | 
 | 84 |  | 
 | 85 | /* | 
 | 86 |  * Asynchronous crypto request structure. | 
 | 87 |  * | 
 | 88 |  * This structure defines a request that is either queued for processing or | 
 | 89 |  * being processed. | 
 | 90 |  */ | 
 | 91 | struct spacc_req { | 
 | 92 | 	struct list_head		list; | 
 | 93 | 	struct spacc_engine		*engine; | 
 | 94 | 	struct crypto_async_request	*req; | 
 | 95 | 	int				result; | 
 | 96 | 	bool				is_encrypt; | 
 | 97 | 	unsigned			ctx_id; | 
 | 98 | 	dma_addr_t			src_addr, dst_addr; | 
 | 99 | 	struct spacc_ddt		*src_ddt, *dst_ddt; | 
 | 100 | 	void				(*complete)(struct spacc_req *req); | 
 | 101 |  | 
 | 102 | 	/* AEAD specific bits. */ | 
 | 103 | 	u8				*giv; | 
 | 104 | 	size_t				giv_len; | 
 | 105 | 	dma_addr_t			giv_pa; | 
 | 106 | }; | 
 | 107 |  | 
 | 108 | struct spacc_engine { | 
 | 109 | 	void __iomem			*regs; | 
 | 110 | 	struct list_head		pending; | 
 | 111 | 	int				next_ctx; | 
 | 112 | 	spinlock_t			hw_lock; | 
 | 113 | 	int				in_flight; | 
 | 114 | 	struct list_head		completed; | 
 | 115 | 	struct list_head		in_progress; | 
 | 116 | 	struct tasklet_struct		complete; | 
 | 117 | 	unsigned long			fifo_sz; | 
 | 118 | 	void __iomem			*cipher_ctx_base; | 
 | 119 | 	void __iomem			*hash_key_base; | 
 | 120 | 	struct spacc_alg		*algs; | 
 | 121 | 	unsigned			num_algs; | 
 | 122 | 	struct list_head		registered_algs; | 
 | 123 | 	size_t				cipher_pg_sz; | 
 | 124 | 	size_t				hash_pg_sz; | 
 | 125 | 	const char			*name; | 
 | 126 | 	struct clk			*clk; | 
 | 127 | 	struct device			*dev; | 
 | 128 | 	unsigned			max_ctxs; | 
 | 129 | 	struct timer_list		packet_timeout; | 
 | 130 | 	unsigned			stat_irq_thresh; | 
 | 131 | 	struct dma_pool			*req_pool; | 
 | 132 | }; | 
 | 133 |  | 
 | 134 | /* Algorithm type mask. */ | 
 | 135 | #define SPACC_CRYPTO_ALG_MASK		0x7 | 
 | 136 |  | 
 | 137 | /* SPACC definition of a crypto algorithm. */ | 
 | 138 | struct spacc_alg { | 
 | 139 | 	unsigned long			ctrl_default; | 
 | 140 | 	unsigned long			type; | 
 | 141 | 	struct crypto_alg		alg; | 
 | 142 | 	struct spacc_engine		*engine; | 
 | 143 | 	struct list_head		entry; | 
 | 144 | 	int				key_offs; | 
 | 145 | 	int				iv_offs; | 
 | 146 | }; | 
 | 147 |  | 
 | 148 | /* Generic context structure for any algorithm type. */ | 
 | 149 | struct spacc_generic_ctx { | 
 | 150 | 	struct spacc_engine		*engine; | 
 | 151 | 	int				flags; | 
 | 152 | 	int				key_offs; | 
 | 153 | 	int				iv_offs; | 
 | 154 | }; | 
 | 155 |  | 
 | 156 | /* Block cipher context. */ | 
 | 157 | struct spacc_ablk_ctx { | 
 | 158 | 	struct spacc_generic_ctx	generic; | 
 | 159 | 	u8				key[AES_MAX_KEY_SIZE]; | 
 | 160 | 	u8				key_len; | 
 | 161 | 	/* | 
 | 162 | 	 * The fallback cipher. If the operation can't be done in hardware, | 
 | 163 | 	 * fallback to a software version. | 
 | 164 | 	 */ | 
 | 165 | 	struct crypto_ablkcipher	*sw_cipher; | 
 | 166 | }; | 
 | 167 |  | 
 | 168 | /* AEAD cipher context. */ | 
 | 169 | struct spacc_aead_ctx { | 
 | 170 | 	struct spacc_generic_ctx	generic; | 
 | 171 | 	u8				cipher_key[AES_MAX_KEY_SIZE]; | 
 | 172 | 	u8				hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ]; | 
 | 173 | 	u8				cipher_key_len; | 
 | 174 | 	u8				hash_key_len; | 
 | 175 | 	struct crypto_aead		*sw_cipher; | 
 | 176 | 	size_t				auth_size; | 
 | 177 | 	u8				salt[AES_BLOCK_SIZE]; | 
 | 178 | }; | 
 | 179 |  | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 180 | static int spacc_ablk_submit(struct spacc_req *req); | 
 | 181 |  | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 182 | static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg) | 
 | 183 | { | 
 | 184 | 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL; | 
 | 185 | } | 
 | 186 |  | 
 | 187 | static inline int spacc_fifo_cmd_full(struct spacc_engine *engine) | 
 | 188 | { | 
 | 189 | 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET); | 
 | 190 |  | 
 | 191 | 	return fifo_stat & SPA_FIFO_CMD_FULL; | 
 | 192 | } | 
 | 193 |  | 
 | 194 | /* | 
 | 195 |  * Given a cipher context, and a context number, get the base address of the | 
 | 196 |  * context page. | 
 | 197 |  * | 
 | 198 |  * Returns the address of the context page where the key/context may | 
 | 199 |  * be written. | 
 | 200 |  */ | 
 | 201 | static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx, | 
 | 202 | 						unsigned indx, | 
 | 203 | 						bool is_cipher_ctx) | 
 | 204 | { | 
 | 205 | 	return is_cipher_ctx ? ctx->engine->cipher_ctx_base + | 
 | 206 | 			(indx * ctx->engine->cipher_pg_sz) : | 
 | 207 | 		ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz); | 
 | 208 | } | 
 | 209 |  | 
 | 210 | /* The context pages can only be written with 32-bit accesses. */ | 
 | 211 | static inline void memcpy_toio32(u32 __iomem *dst, const void *src, | 
 | 212 | 				 unsigned count) | 
 | 213 | { | 
 | 214 | 	const u32 *src32 = (const u32 *) src; | 
 | 215 |  | 
 | 216 | 	while (count--) | 
 | 217 | 		writel(*src32++, dst++); | 
 | 218 | } | 
 | 219 |  | 
 | 220 | static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx, | 
 | 221 | 				   void __iomem *page_addr, const u8 *key, | 
 | 222 | 				   size_t key_len, const u8 *iv, size_t iv_len) | 
 | 223 | { | 
 | 224 | 	void __iomem *key_ptr = page_addr + ctx->key_offs; | 
 | 225 | 	void __iomem *iv_ptr = page_addr + ctx->iv_offs; | 
 | 226 |  | 
 | 227 | 	memcpy_toio32(key_ptr, key, key_len / 4); | 
 | 228 | 	memcpy_toio32(iv_ptr, iv, iv_len / 4); | 
 | 229 | } | 
 | 230 |  | 
 | 231 | /* | 
 | 232 |  * Load a context into the engines context memory. | 
 | 233 |  * | 
 | 234 |  * Returns the index of the context page where the context was loaded. | 
 | 235 |  */ | 
 | 236 | static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx, | 
 | 237 | 			       const u8 *ciph_key, size_t ciph_len, | 
 | 238 | 			       const u8 *iv, size_t ivlen, const u8 *hash_key, | 
 | 239 | 			       size_t hash_len) | 
 | 240 | { | 
 | 241 | 	unsigned indx = ctx->engine->next_ctx++; | 
 | 242 | 	void __iomem *ciph_page_addr, *hash_page_addr; | 
 | 243 |  | 
 | 244 | 	ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1); | 
 | 245 | 	hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0); | 
 | 246 |  | 
 | 247 | 	ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1; | 
 | 248 | 	spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv, | 
 | 249 | 			       ivlen); | 
 | 250 | 	writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) | | 
 | 251 | 	       (1 << SPA_KEY_SZ_CIPHER_OFFSET), | 
 | 252 | 	       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | 
 | 253 |  | 
 | 254 | 	if (hash_key) { | 
 | 255 | 		memcpy_toio32(hash_page_addr, hash_key, hash_len / 4); | 
 | 256 | 		writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET), | 
 | 257 | 		       ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET); | 
 | 258 | 	} | 
 | 259 |  | 
 | 260 | 	return indx; | 
 | 261 | } | 
 | 262 |  | 
 | 263 | /* Count the number of scatterlist entries in a scatterlist. */ | 
 | 264 | static int sg_count(struct scatterlist *sg_list, int nbytes) | 
 | 265 | { | 
 | 266 | 	struct scatterlist *sg = sg_list; | 
 | 267 | 	int sg_nents = 0; | 
 | 268 |  | 
 | 269 | 	while (nbytes > 0) { | 
 | 270 | 		++sg_nents; | 
 | 271 | 		nbytes -= sg->length; | 
 | 272 | 		sg = sg_next(sg); | 
 | 273 | 	} | 
 | 274 |  | 
 | 275 | 	return sg_nents; | 
 | 276 | } | 
 | 277 |  | 
 | 278 | static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len) | 
 | 279 | { | 
 | 280 | 	ddt->p = phys; | 
 | 281 | 	ddt->len = len; | 
 | 282 | } | 
 | 283 |  | 
 | 284 | /* | 
 | 285 |  * Take a crypto request and scatterlists for the data and turn them into DDTs | 
 | 286 |  * for passing to the crypto engines. This also DMA maps the data so that the | 
 | 287 |  * crypto engines can DMA to/from them. | 
 | 288 |  */ | 
 | 289 | static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine, | 
 | 290 | 					 struct scatterlist *payload, | 
 | 291 | 					 unsigned nbytes, | 
 | 292 | 					 enum dma_data_direction dir, | 
 | 293 | 					 dma_addr_t *ddt_phys) | 
 | 294 | { | 
 | 295 | 	unsigned nents, mapped_ents; | 
 | 296 | 	struct scatterlist *cur; | 
 | 297 | 	struct spacc_ddt *ddt; | 
 | 298 | 	int i; | 
 | 299 |  | 
 | 300 | 	nents = sg_count(payload, nbytes); | 
 | 301 | 	mapped_ents = dma_map_sg(engine->dev, payload, nents, dir); | 
 | 302 |  | 
 | 303 | 	if (mapped_ents + 1 > MAX_DDT_LEN) | 
 | 304 | 		goto out; | 
 | 305 |  | 
 | 306 | 	ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys); | 
 | 307 | 	if (!ddt) | 
 | 308 | 		goto out; | 
 | 309 |  | 
 | 310 | 	for_each_sg(payload, cur, mapped_ents, i) | 
 | 311 | 		ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur)); | 
 | 312 | 	ddt_set(&ddt[mapped_ents], 0, 0); | 
 | 313 |  | 
 | 314 | 	return ddt; | 
 | 315 |  | 
 | 316 | out: | 
 | 317 | 	dma_unmap_sg(engine->dev, payload, nents, dir); | 
 | 318 | 	return NULL; | 
 | 319 | } | 
 | 320 |  | 
 | 321 | static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) | 
 | 322 | { | 
 | 323 | 	struct aead_request *areq = container_of(req->req, struct aead_request, | 
 | 324 | 						 base); | 
 | 325 | 	struct spacc_engine *engine = req->engine; | 
 | 326 | 	struct spacc_ddt *src_ddt, *dst_ddt; | 
 | 327 | 	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq)); | 
 | 328 | 	unsigned nents = sg_count(areq->src, areq->cryptlen); | 
 | 329 | 	dma_addr_t iv_addr; | 
 | 330 | 	struct scatterlist *cur; | 
 | 331 | 	int i, dst_ents, src_ents, assoc_ents; | 
 | 332 | 	u8 *iv = giv ? giv : areq->iv; | 
 | 333 |  | 
 | 334 | 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); | 
 | 335 | 	if (!src_ddt) | 
 | 336 | 		return -ENOMEM; | 
 | 337 |  | 
 | 338 | 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); | 
 | 339 | 	if (!dst_ddt) { | 
 | 340 | 		dma_pool_free(engine->req_pool, src_ddt, req->src_addr); | 
 | 341 | 		return -ENOMEM; | 
 | 342 | 	} | 
 | 343 |  | 
 | 344 | 	req->src_ddt = src_ddt; | 
 | 345 | 	req->dst_ddt = dst_ddt; | 
 | 346 |  | 
 | 347 | 	assoc_ents = dma_map_sg(engine->dev, areq->assoc, | 
 | 348 | 		sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | 
 | 349 | 	if (areq->src != areq->dst) { | 
 | 350 | 		src_ents = dma_map_sg(engine->dev, areq->src, nents, | 
 | 351 | 				      DMA_TO_DEVICE); | 
 | 352 | 		dst_ents = dma_map_sg(engine->dev, areq->dst, nents, | 
 | 353 | 				      DMA_FROM_DEVICE); | 
 | 354 | 	} else { | 
 | 355 | 		src_ents = dma_map_sg(engine->dev, areq->src, nents, | 
 | 356 | 				      DMA_BIDIRECTIONAL); | 
 | 357 | 		dst_ents = 0; | 
 | 358 | 	} | 
 | 359 |  | 
 | 360 | 	/* | 
 | 361 | 	 * Map the IV/GIV. For the GIV it needs to be bidirectional as it is | 
 | 362 | 	 * formed by the crypto block and sent as the ESP IV for IPSEC. | 
 | 363 | 	 */ | 
 | 364 | 	iv_addr = dma_map_single(engine->dev, iv, ivsize, | 
 | 365 | 				 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); | 
 | 366 | 	req->giv_pa = iv_addr; | 
 | 367 |  | 
 | 368 | 	/* | 
 | 369 | 	 * Map the associated data. For decryption we don't copy the | 
 | 370 | 	 * associated data. | 
 | 371 | 	 */ | 
 | 372 | 	for_each_sg(areq->assoc, cur, assoc_ents, i) { | 
 | 373 | 		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | 
 | 374 | 		if (req->is_encrypt) | 
 | 375 | 			ddt_set(dst_ddt++, sg_dma_address(cur), | 
 | 376 | 				sg_dma_len(cur)); | 
 | 377 | 	} | 
 | 378 | 	ddt_set(src_ddt++, iv_addr, ivsize); | 
 | 379 |  | 
 | 380 | 	if (giv || req->is_encrypt) | 
 | 381 | 		ddt_set(dst_ddt++, iv_addr, ivsize); | 
 | 382 |  | 
 | 383 | 	/* | 
 | 384 | 	 * Now map in the payload for the source and destination and terminate | 
 | 385 | 	 * with the NULL pointers. | 
 | 386 | 	 */ | 
 | 387 | 	for_each_sg(areq->src, cur, src_ents, i) { | 
 | 388 | 		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur)); | 
 | 389 | 		if (areq->src == areq->dst) | 
 | 390 | 			ddt_set(dst_ddt++, sg_dma_address(cur), | 
 | 391 | 				sg_dma_len(cur)); | 
 | 392 | 	} | 
 | 393 |  | 
 | 394 | 	for_each_sg(areq->dst, cur, dst_ents, i) | 
 | 395 | 		ddt_set(dst_ddt++, sg_dma_address(cur), | 
 | 396 | 			sg_dma_len(cur)); | 
 | 397 |  | 
 | 398 | 	ddt_set(src_ddt, 0, 0); | 
 | 399 | 	ddt_set(dst_ddt, 0, 0); | 
 | 400 |  | 
 | 401 | 	return 0; | 
 | 402 | } | 
 | 403 |  | 
 | 404 | static void spacc_aead_free_ddts(struct spacc_req *req) | 
 | 405 | { | 
 | 406 | 	struct aead_request *areq = container_of(req->req, struct aead_request, | 
 | 407 | 						 base); | 
 | 408 | 	struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); | 
 | 409 | 	struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); | 
 | 410 | 	struct spacc_engine *engine = aead_ctx->generic.engine; | 
 | 411 | 	unsigned ivsize = alg->alg.cra_aead.ivsize; | 
 | 412 | 	unsigned nents = sg_count(areq->src, areq->cryptlen); | 
 | 413 |  | 
 | 414 | 	if (areq->src != areq->dst) { | 
 | 415 | 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE); | 
 | 416 | 		dma_unmap_sg(engine->dev, areq->dst, | 
 | 417 | 			     sg_count(areq->dst, areq->cryptlen), | 
 | 418 | 			     DMA_FROM_DEVICE); | 
 | 419 | 	} else | 
 | 420 | 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL); | 
 | 421 |  | 
 | 422 | 	dma_unmap_sg(engine->dev, areq->assoc, | 
 | 423 | 		     sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE); | 
 | 424 |  | 
 | 425 | 	dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); | 
 | 426 |  | 
 | 427 | 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); | 
 | 428 | 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); | 
 | 429 | } | 
 | 430 |  | 
 | 431 | static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, | 
 | 432 | 			   dma_addr_t ddt_addr, struct scatterlist *payload, | 
 | 433 | 			   unsigned nbytes, enum dma_data_direction dir) | 
 | 434 | { | 
 | 435 | 	unsigned nents = sg_count(payload, nbytes); | 
 | 436 |  | 
 | 437 | 	dma_unmap_sg(req->engine->dev, payload, nents, dir); | 
 | 438 | 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr); | 
 | 439 | } | 
 | 440 |  | 
 | 441 | /* | 
 | 442 |  * Set key for a DES operation in an AEAD cipher. This also performs weak key | 
 | 443 |  * checking if required. | 
 | 444 |  */ | 
 | 445 | static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key, | 
 | 446 | 				 unsigned int len) | 
 | 447 | { | 
 | 448 | 	struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 
 | 449 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 450 | 	u32 tmp[DES_EXPKEY_WORDS]; | 
 | 451 |  | 
 | 452 | 	if (unlikely(!des_ekey(tmp, key)) && | 
 | 453 | 	    (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) { | 
 | 454 | 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 
 | 455 | 		return -EINVAL; | 
 | 456 | 	} | 
 | 457 |  | 
 | 458 | 	memcpy(ctx->cipher_key, key, len); | 
 | 459 | 	ctx->cipher_key_len = len; | 
 | 460 |  | 
 | 461 | 	return 0; | 
 | 462 | } | 
 | 463 |  | 
 | 464 | /* Set the key for the AES block cipher component of the AEAD transform. */ | 
 | 465 | static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key, | 
 | 466 | 				 unsigned int len) | 
 | 467 | { | 
 | 468 | 	struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 
 | 469 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 470 |  | 
 | 471 | 	/* | 
 | 472 | 	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a | 
 | 473 | 	 * request for any other size (192 bits) then we need to do a software | 
 | 474 | 	 * fallback. | 
 | 475 | 	 */ | 
 | 476 | 	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) { | 
 | 477 | 		/* | 
 | 478 | 		 * Set the fallback transform to use the same request flags as | 
 | 479 | 		 * the hardware transform. | 
 | 480 | 		 */ | 
 | 481 | 		ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 
 | 482 | 		ctx->sw_cipher->base.crt_flags |= | 
 | 483 | 			tfm->crt_flags & CRYPTO_TFM_REQ_MASK; | 
 | 484 | 		return crypto_aead_setkey(ctx->sw_cipher, key, len); | 
 | 485 | 	} | 
 | 486 |  | 
 | 487 | 	memcpy(ctx->cipher_key, key, len); | 
 | 488 | 	ctx->cipher_key_len = len; | 
 | 489 |  | 
 | 490 | 	return 0; | 
 | 491 | } | 
 | 492 |  | 
 | 493 | static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key, | 
 | 494 | 			     unsigned int keylen) | 
 | 495 | { | 
 | 496 | 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 
 | 497 | 	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | 
 | 498 | 	struct rtattr *rta = (void *)key; | 
 | 499 | 	struct crypto_authenc_key_param *param; | 
 | 500 | 	unsigned int authkeylen, enckeylen; | 
 | 501 | 	int err = -EINVAL; | 
 | 502 |  | 
 | 503 | 	if (!RTA_OK(rta, keylen)) | 
 | 504 | 		goto badkey; | 
 | 505 |  | 
 | 506 | 	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) | 
 | 507 | 		goto badkey; | 
 | 508 |  | 
 | 509 | 	if (RTA_PAYLOAD(rta) < sizeof(*param)) | 
 | 510 | 		goto badkey; | 
 | 511 |  | 
 | 512 | 	param = RTA_DATA(rta); | 
 | 513 | 	enckeylen = be32_to_cpu(param->enckeylen); | 
 | 514 |  | 
 | 515 | 	key += RTA_ALIGN(rta->rta_len); | 
 | 516 | 	keylen -= RTA_ALIGN(rta->rta_len); | 
 | 517 |  | 
 | 518 | 	if (keylen < enckeylen) | 
 | 519 | 		goto badkey; | 
 | 520 |  | 
 | 521 | 	authkeylen = keylen - enckeylen; | 
 | 522 |  | 
 | 523 | 	if (enckeylen > AES_MAX_KEY_SIZE) | 
 | 524 | 		goto badkey; | 
 | 525 |  | 
 | 526 | 	if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 
 | 527 | 	    SPA_CTRL_CIPH_ALG_AES) | 
 | 528 | 		err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen); | 
 | 529 | 	else | 
 | 530 | 		err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen); | 
 | 531 |  | 
 | 532 | 	if (err) | 
 | 533 | 		goto badkey; | 
 | 534 |  | 
 | 535 | 	memcpy(ctx->hash_ctx, key, authkeylen); | 
 | 536 | 	ctx->hash_key_len = authkeylen; | 
 | 537 |  | 
 | 538 | 	return 0; | 
 | 539 |  | 
 | 540 | badkey: | 
 | 541 | 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | 
 | 542 | 	return -EINVAL; | 
 | 543 | } | 
 | 544 |  | 
 | 545 | static int spacc_aead_setauthsize(struct crypto_aead *tfm, | 
 | 546 | 				  unsigned int authsize) | 
 | 547 | { | 
 | 548 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm)); | 
 | 549 |  | 
 | 550 | 	ctx->auth_size = authsize; | 
 | 551 |  | 
 | 552 | 	return 0; | 
 | 553 | } | 
 | 554 |  | 
 | 555 | /* | 
 | 556 |  * Check if an AEAD request requires a fallback operation. Some requests can't | 
 | 557 |  * be completed in hardware because the hardware may not support certain key | 
 | 558 |  * sizes. In these cases we need to complete the request in software. | 
 | 559 |  */ | 
 | 560 | static int spacc_aead_need_fallback(struct spacc_req *req) | 
 | 561 | { | 
 | 562 | 	struct aead_request *aead_req; | 
 | 563 | 	struct crypto_tfm *tfm = req->req->tfm; | 
 | 564 | 	struct crypto_alg *alg = req->req->tfm->__crt_alg; | 
 | 565 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 566 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 567 |  | 
 | 568 | 	aead_req = container_of(req->req, struct aead_request, base); | 
 | 569 | 	/* | 
 | 570 | 	 * If we have a non-supported key-length, then we need to do a | 
 | 571 | 	 * software fallback. | 
 | 572 | 	 */ | 
 | 573 | 	if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 
 | 574 | 	    SPA_CTRL_CIPH_ALG_AES && | 
 | 575 | 	    ctx->cipher_key_len != AES_KEYSIZE_128 && | 
 | 576 | 	    ctx->cipher_key_len != AES_KEYSIZE_256) | 
 | 577 | 		return 1; | 
 | 578 |  | 
 | 579 | 	return 0; | 
 | 580 | } | 
 | 581 |  | 
 | 582 | static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, | 
 | 583 | 				  bool is_encrypt) | 
 | 584 | { | 
 | 585 | 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); | 
 | 586 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm); | 
 | 587 | 	int err; | 
 | 588 |  | 
 | 589 | 	if (ctx->sw_cipher) { | 
 | 590 | 		/* | 
 | 591 | 		 * Change the request to use the software fallback transform, | 
 | 592 | 		 * and once the ciphering has completed, put the old transform | 
 | 593 | 		 * back into the request. | 
 | 594 | 		 */ | 
 | 595 | 		aead_request_set_tfm(req, ctx->sw_cipher); | 
 | 596 | 		err = is_encrypt ? crypto_aead_encrypt(req) : | 
 | 597 | 		    crypto_aead_decrypt(req); | 
 | 598 | 		aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); | 
 | 599 | 	} else | 
 | 600 | 		err = -EINVAL; | 
 | 601 |  | 
 | 602 | 	return err; | 
 | 603 | } | 
 | 604 |  | 
 | 605 | static void spacc_aead_complete(struct spacc_req *req) | 
 | 606 | { | 
 | 607 | 	spacc_aead_free_ddts(req); | 
 | 608 | 	req->req->complete(req->req, req->result); | 
 | 609 | } | 
 | 610 |  | 
 | 611 | static int spacc_aead_submit(struct spacc_req *req) | 
 | 612 | { | 
 | 613 | 	struct crypto_tfm *tfm = req->req->tfm; | 
 | 614 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 615 | 	struct crypto_alg *alg = req->req->tfm->__crt_alg; | 
 | 616 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 617 | 	struct spacc_engine *engine = ctx->generic.engine; | 
 | 618 | 	u32 ctrl, proc_len, assoc_len; | 
 | 619 | 	struct aead_request *aead_req = | 
 | 620 | 		container_of(req->req, struct aead_request, base); | 
 | 621 |  | 
 | 622 | 	req->result = -EINPROGRESS; | 
 | 623 | 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, | 
 | 624 | 		ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize, | 
 | 625 | 		ctx->hash_ctx, ctx->hash_key_len); | 
 | 626 |  | 
 | 627 | 	/* Set the source and destination DDT pointers. */ | 
 | 628 | 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | 
 | 629 | 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | 
 | 630 | 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | 
 | 631 |  | 
 | 632 | 	assoc_len = aead_req->assoclen; | 
 | 633 | 	proc_len = aead_req->cryptlen + assoc_len; | 
 | 634 |  | 
 | 635 | 	/* | 
 | 636 | 	 * If we aren't generating an IV, then we need to include the IV in the | 
 | 637 | 	 * associated data so that it is included in the hash. | 
 | 638 | 	 */ | 
 | 639 | 	if (!req->giv) { | 
 | 640 | 		assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | 
 | 641 | 		proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req)); | 
 | 642 | 	} else | 
 | 643 | 		proc_len += req->giv_len; | 
 | 644 |  | 
 | 645 | 	/* | 
 | 646 | 	 * If we are decrypting, we need to take the length of the ICV out of | 
 | 647 | 	 * the processing length. | 
 | 648 | 	 */ | 
 | 649 | 	if (!req->is_encrypt) | 
 | 650 | 		proc_len -= ctx->auth_size; | 
 | 651 |  | 
 | 652 | 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET); | 
 | 653 | 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET); | 
 | 654 | 	writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET); | 
 | 655 | 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | 
 | 656 | 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | 
 | 657 |  | 
 | 658 | 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | 
 | 659 | 		(1 << SPA_CTRL_ICV_APPEND); | 
 | 660 | 	if (req->is_encrypt) | 
 | 661 | 		ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY); | 
 | 662 | 	else | 
 | 663 | 		ctrl |= (1 << SPA_CTRL_KEY_EXP); | 
 | 664 |  | 
 | 665 | 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | 
 | 666 |  | 
 | 667 | 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | 
 | 668 |  | 
 | 669 | 	return -EINPROGRESS; | 
 | 670 | } | 
 | 671 |  | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 672 | static int spacc_req_submit(struct spacc_req *req); | 
 | 673 |  | 
 | 674 | static void spacc_push(struct spacc_engine *engine) | 
 | 675 | { | 
 | 676 | 	struct spacc_req *req; | 
 | 677 |  | 
 | 678 | 	while (!list_empty(&engine->pending) && | 
 | 679 | 	       engine->in_flight + 1 <= engine->fifo_sz) { | 
 | 680 |  | 
 | 681 | 		++engine->in_flight; | 
 | 682 | 		req = list_first_entry(&engine->pending, struct spacc_req, | 
 | 683 | 				       list); | 
 | 684 | 		list_move_tail(&req->list, &engine->in_progress); | 
 | 685 |  | 
 | 686 | 		req->result = spacc_req_submit(req); | 
 | 687 | 	} | 
 | 688 | } | 
 | 689 |  | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 690 | /* | 
 | 691 |  * Setup an AEAD request for processing. This will configure the engine, load | 
 | 692 |  * the context and then start the packet processing. | 
 | 693 |  * | 
 | 694 |  * @giv Pointer to destination address for a generated IV. If the | 
 | 695 |  *	request does not need to generate an IV then this should be set to NULL. | 
 | 696 |  */ | 
 | 697 | static int spacc_aead_setup(struct aead_request *req, u8 *giv, | 
 | 698 | 			    unsigned alg_type, bool is_encrypt) | 
 | 699 | { | 
 | 700 | 	struct crypto_alg *alg = req->base.tfm->__crt_alg; | 
 | 701 | 	struct spacc_engine *engine = to_spacc_alg(alg)->engine; | 
 | 702 | 	struct spacc_req *dev_req = aead_request_ctx(req); | 
 | 703 | 	int err = -EINPROGRESS; | 
 | 704 | 	unsigned long flags; | 
 | 705 | 	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); | 
 | 706 |  | 
 | 707 | 	dev_req->giv		= giv; | 
 | 708 | 	dev_req->giv_len	= ivsize; | 
 | 709 | 	dev_req->req		= &req->base; | 
 | 710 | 	dev_req->is_encrypt	= is_encrypt; | 
 | 711 | 	dev_req->result		= -EBUSY; | 
 | 712 | 	dev_req->engine		= engine; | 
 | 713 | 	dev_req->complete	= spacc_aead_complete; | 
 | 714 |  | 
 | 715 | 	if (unlikely(spacc_aead_need_fallback(dev_req))) | 
 | 716 | 		return spacc_aead_do_fallback(req, alg_type, is_encrypt); | 
 | 717 |  | 
 | 718 | 	spacc_aead_make_ddts(dev_req, dev_req->giv); | 
 | 719 |  | 
 | 720 | 	err = -EINPROGRESS; | 
 | 721 | 	spin_lock_irqsave(&engine->hw_lock, flags); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 722 | 	if (unlikely(spacc_fifo_cmd_full(engine)) || | 
 | 723 | 	    engine->in_flight + 1 > engine->fifo_sz) { | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 724 | 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | 
 | 725 | 			err = -EBUSY; | 
 | 726 | 			spin_unlock_irqrestore(&engine->hw_lock, flags); | 
 | 727 | 			goto out_free_ddts; | 
 | 728 | 		} | 
 | 729 | 		list_add_tail(&dev_req->list, &engine->pending); | 
 | 730 | 	} else { | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 731 | 		list_add_tail(&dev_req->list, &engine->pending); | 
 | 732 | 		spacc_push(engine); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 733 | 	} | 
 | 734 | 	spin_unlock_irqrestore(&engine->hw_lock, flags); | 
 | 735 |  | 
 | 736 | 	goto out; | 
 | 737 |  | 
 | 738 | out_free_ddts: | 
 | 739 | 	spacc_aead_free_ddts(dev_req); | 
 | 740 | out: | 
 | 741 | 	return err; | 
 | 742 | } | 
 | 743 |  | 
 | 744 | static int spacc_aead_encrypt(struct aead_request *req) | 
 | 745 | { | 
 | 746 | 	struct crypto_aead *aead = crypto_aead_reqtfm(req); | 
 | 747 | 	struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 
 | 748 | 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | 
 | 749 |  | 
 | 750 | 	return spacc_aead_setup(req, NULL, alg->type, 1); | 
 | 751 | } | 
 | 752 |  | 
 | 753 | static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) | 
 | 754 | { | 
 | 755 | 	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); | 
 | 756 | 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm); | 
 | 757 | 	size_t ivsize = crypto_aead_ivsize(tfm); | 
 | 758 | 	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg); | 
 | 759 | 	unsigned len; | 
 | 760 | 	__be64 seq; | 
 | 761 |  | 
 | 762 | 	memcpy(req->areq.iv, ctx->salt, ivsize); | 
 | 763 | 	len = ivsize; | 
 | 764 | 	if (ivsize > sizeof(u64)) { | 
 | 765 | 		memset(req->giv, 0, ivsize - sizeof(u64)); | 
 | 766 | 		len = sizeof(u64); | 
 | 767 | 	} | 
 | 768 | 	seq = cpu_to_be64(req->seq); | 
 | 769 | 	memcpy(req->giv + ivsize - len, &seq, len); | 
 | 770 |  | 
 | 771 | 	return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); | 
 | 772 | } | 
 | 773 |  | 
 | 774 | static int spacc_aead_decrypt(struct aead_request *req) | 
 | 775 | { | 
 | 776 | 	struct crypto_aead *aead = crypto_aead_reqtfm(req); | 
 | 777 | 	struct crypto_tfm *tfm = crypto_aead_tfm(aead); | 
 | 778 | 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | 
 | 779 |  | 
 | 780 | 	return spacc_aead_setup(req, NULL, alg->type, 0); | 
 | 781 | } | 
 | 782 |  | 
 | 783 | /* | 
 | 784 |  * Initialise a new AEAD context. This is responsible for allocating the | 
 | 785 |  * fallback cipher and initialising the context. | 
 | 786 |  */ | 
 | 787 | static int spacc_aead_cra_init(struct crypto_tfm *tfm) | 
 | 788 | { | 
 | 789 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 790 | 	struct crypto_alg *alg = tfm->__crt_alg; | 
 | 791 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 792 | 	struct spacc_engine *engine = spacc_alg->engine; | 
 | 793 |  | 
 | 794 | 	ctx->generic.flags = spacc_alg->type; | 
 | 795 | 	ctx->generic.engine = engine; | 
 | 796 | 	ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0, | 
 | 797 | 					   CRYPTO_ALG_ASYNC | | 
 | 798 | 					   CRYPTO_ALG_NEED_FALLBACK); | 
 | 799 | 	if (IS_ERR(ctx->sw_cipher)) { | 
 | 800 | 		dev_warn(engine->dev, "failed to allocate fallback for %s\n", | 
 | 801 | 			 alg->cra_name); | 
 | 802 | 		ctx->sw_cipher = NULL; | 
 | 803 | 	} | 
 | 804 | 	ctx->generic.key_offs = spacc_alg->key_offs; | 
 | 805 | 	ctx->generic.iv_offs = spacc_alg->iv_offs; | 
 | 806 |  | 
 | 807 | 	get_random_bytes(ctx->salt, sizeof(ctx->salt)); | 
 | 808 |  | 
 | 809 | 	tfm->crt_aead.reqsize = sizeof(struct spacc_req); | 
 | 810 |  | 
 | 811 | 	return 0; | 
 | 812 | } | 
 | 813 |  | 
 | 814 | /* | 
 | 815 |  * Destructor for an AEAD context. This is called when the transform is freed | 
 | 816 |  * and must free the fallback cipher. | 
 | 817 |  */ | 
 | 818 | static void spacc_aead_cra_exit(struct crypto_tfm *tfm) | 
 | 819 | { | 
 | 820 | 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 821 |  | 
 | 822 | 	if (ctx->sw_cipher) | 
 | 823 | 		crypto_free_aead(ctx->sw_cipher); | 
 | 824 | 	ctx->sw_cipher = NULL; | 
 | 825 | } | 
 | 826 |  | 
 | 827 | /* | 
 | 828 |  * Set the DES key for a block cipher transform. This also performs weak key | 
 | 829 |  * checking if the transform has requested it. | 
 | 830 |  */ | 
 | 831 | static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 
 | 832 | 			    unsigned int len) | 
 | 833 | { | 
 | 834 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 835 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 836 | 	u32 tmp[DES_EXPKEY_WORDS]; | 
 | 837 |  | 
 | 838 | 	if (len > DES3_EDE_KEY_SIZE) { | 
 | 839 | 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 
 | 840 | 		return -EINVAL; | 
 | 841 | 	} | 
 | 842 |  | 
 | 843 | 	if (unlikely(!des_ekey(tmp, key)) && | 
 | 844 | 	    (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { | 
 | 845 | 		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | 
 | 846 | 		return -EINVAL; | 
 | 847 | 	} | 
 | 848 |  | 
 | 849 | 	memcpy(ctx->key, key, len); | 
 | 850 | 	ctx->key_len = len; | 
 | 851 |  | 
 | 852 | 	return 0; | 
 | 853 | } | 
 | 854 |  | 
 | 855 | /* | 
 | 856 |  * Set the key for an AES block cipher. Some key lengths are not supported in | 
 | 857 |  * hardware so this must also check whether a fallback is needed. | 
 | 858 |  */ | 
 | 859 | static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | 
 | 860 | 			    unsigned int len) | 
 | 861 | { | 
 | 862 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 863 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 864 | 	int err = 0; | 
 | 865 |  | 
 | 866 | 	if (len > AES_MAX_KEY_SIZE) { | 
 | 867 | 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 
 | 868 | 		return -EINVAL; | 
 | 869 | 	} | 
 | 870 |  | 
 | 871 | 	/* | 
 | 872 | 	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a | 
 | 873 | 	 * request for any other size (192 bits) then we need to do a software | 
 | 874 | 	 * fallback. | 
 | 875 | 	 */ | 
 | 876 | 	if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) && | 
 | 877 | 	    ctx->sw_cipher) { | 
 | 878 | 		/* | 
 | 879 | 		 * Set the fallback transform to use the same request flags as | 
 | 880 | 		 * the hardware transform. | 
 | 881 | 		 */ | 
 | 882 | 		ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; | 
 | 883 | 		ctx->sw_cipher->base.crt_flags |= | 
 | 884 | 			cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK; | 
 | 885 |  | 
 | 886 | 		err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len); | 
 | 887 | 		if (err) | 
 | 888 | 			goto sw_setkey_failed; | 
 | 889 | 	} else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) && | 
 | 890 | 		   !ctx->sw_cipher) | 
 | 891 | 		err = -EINVAL; | 
 | 892 |  | 
 | 893 | 	memcpy(ctx->key, key, len); | 
 | 894 | 	ctx->key_len = len; | 
 | 895 |  | 
 | 896 | sw_setkey_failed: | 
 | 897 | 	if (err && ctx->sw_cipher) { | 
 | 898 | 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | 
 | 899 | 		tfm->crt_flags |= | 
 | 900 | 			ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK; | 
 | 901 | 	} | 
 | 902 |  | 
 | 903 | 	return err; | 
 | 904 | } | 
 | 905 |  | 
 | 906 | static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher, | 
 | 907 | 				  const u8 *key, unsigned int len) | 
 | 908 | { | 
 | 909 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 910 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 911 | 	int err = 0; | 
 | 912 |  | 
 | 913 | 	if (len > AES_MAX_KEY_SIZE) { | 
 | 914 | 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | 
 | 915 | 		err = -EINVAL; | 
 | 916 | 		goto out; | 
 | 917 | 	} | 
 | 918 |  | 
 | 919 | 	memcpy(ctx->key, key, len); | 
 | 920 | 	ctx->key_len = len; | 
 | 921 |  | 
 | 922 | out: | 
 | 923 | 	return err; | 
 | 924 | } | 
 | 925 |  | 
 | 926 | static int spacc_ablk_need_fallback(struct spacc_req *req) | 
 | 927 | { | 
 | 928 | 	struct spacc_ablk_ctx *ctx; | 
 | 929 | 	struct crypto_tfm *tfm = req->req->tfm; | 
 | 930 | 	struct crypto_alg *alg = req->req->tfm->__crt_alg; | 
 | 931 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 932 |  | 
 | 933 | 	ctx = crypto_tfm_ctx(tfm); | 
 | 934 |  | 
 | 935 | 	return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) == | 
 | 936 | 			SPA_CTRL_CIPH_ALG_AES && | 
 | 937 | 			ctx->key_len != AES_KEYSIZE_128 && | 
 | 938 | 			ctx->key_len != AES_KEYSIZE_256; | 
 | 939 | } | 
 | 940 |  | 
 | 941 | static void spacc_ablk_complete(struct spacc_req *req) | 
 | 942 | { | 
 | 943 | 	struct ablkcipher_request *ablk_req = | 
 | 944 | 		container_of(req->req, struct ablkcipher_request, base); | 
 | 945 |  | 
 | 946 | 	if (ablk_req->src != ablk_req->dst) { | 
 | 947 | 		spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, | 
 | 948 | 			       ablk_req->nbytes, DMA_TO_DEVICE); | 
 | 949 | 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | 
 | 950 | 			       ablk_req->nbytes, DMA_FROM_DEVICE); | 
 | 951 | 	} else | 
 | 952 | 		spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, | 
 | 953 | 			       ablk_req->nbytes, DMA_BIDIRECTIONAL); | 
 | 954 |  | 
 | 955 | 	req->req->complete(req->req, req->result); | 
 | 956 | } | 
 | 957 |  | 
 | 958 | static int spacc_ablk_submit(struct spacc_req *req) | 
 | 959 | { | 
 | 960 | 	struct crypto_tfm *tfm = req->req->tfm; | 
 | 961 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 962 | 	struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); | 
 | 963 | 	struct crypto_alg *alg = req->req->tfm->__crt_alg; | 
 | 964 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 965 | 	struct spacc_engine *engine = ctx->generic.engine; | 
 | 966 | 	u32 ctrl; | 
 | 967 |  | 
 | 968 | 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, | 
 | 969 | 		ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize, | 
 | 970 | 		NULL, 0); | 
 | 971 |  | 
 | 972 | 	writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); | 
 | 973 | 	writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); | 
 | 974 | 	writel(0, engine->regs + SPA_OFFSET_REG_OFFSET); | 
 | 975 |  | 
 | 976 | 	writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET); | 
 | 977 | 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET); | 
 | 978 | 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET); | 
 | 979 | 	writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET); | 
 | 980 |  | 
 | 981 | 	ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | | 
 | 982 | 		(req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : | 
 | 983 | 		 (1 << SPA_CTRL_KEY_EXP)); | 
 | 984 |  | 
 | 985 | 	mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | 
 | 986 |  | 
 | 987 | 	writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET); | 
 | 988 |  | 
 | 989 | 	return -EINPROGRESS; | 
 | 990 | } | 
 | 991 |  | 
 | 992 | static int spacc_ablk_do_fallback(struct ablkcipher_request *req, | 
 | 993 | 				  unsigned alg_type, bool is_encrypt) | 
 | 994 | { | 
 | 995 | 	struct crypto_tfm *old_tfm = | 
 | 996 | 	    crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); | 
 | 997 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm); | 
 | 998 | 	int err; | 
 | 999 |  | 
 | 1000 | 	if (!ctx->sw_cipher) | 
 | 1001 | 		return -EINVAL; | 
 | 1002 |  | 
 | 1003 | 	/* | 
 | 1004 | 	 * Change the request to use the software fallback transform, and once | 
 | 1005 | 	 * the ciphering has completed, put the old transform back into the | 
 | 1006 | 	 * request. | 
 | 1007 | 	 */ | 
 | 1008 | 	ablkcipher_request_set_tfm(req, ctx->sw_cipher); | 
 | 1009 | 	err = is_encrypt ? crypto_ablkcipher_encrypt(req) : | 
 | 1010 | 		crypto_ablkcipher_decrypt(req); | 
 | 1011 | 	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); | 
 | 1012 |  | 
 | 1013 | 	return err; | 
 | 1014 | } | 
 | 1015 |  | 
 | 1016 | static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, | 
 | 1017 | 			    bool is_encrypt) | 
 | 1018 | { | 
 | 1019 | 	struct crypto_alg *alg = req->base.tfm->__crt_alg; | 
 | 1020 | 	struct spacc_engine *engine = to_spacc_alg(alg)->engine; | 
 | 1021 | 	struct spacc_req *dev_req = ablkcipher_request_ctx(req); | 
 | 1022 | 	unsigned long flags; | 
 | 1023 | 	int err = -ENOMEM; | 
 | 1024 |  | 
 | 1025 | 	dev_req->req		= &req->base; | 
 | 1026 | 	dev_req->is_encrypt	= is_encrypt; | 
 | 1027 | 	dev_req->engine		= engine; | 
 | 1028 | 	dev_req->complete	= spacc_ablk_complete; | 
 | 1029 | 	dev_req->result		= -EINPROGRESS; | 
 | 1030 |  | 
 | 1031 | 	if (unlikely(spacc_ablk_need_fallback(dev_req))) | 
 | 1032 | 		return spacc_ablk_do_fallback(req, alg_type, is_encrypt); | 
 | 1033 |  | 
 | 1034 | 	/* | 
 | 1035 | 	 * Create the DDT's for the engine. If we share the same source and | 
 | 1036 | 	 * destination then we can optimize by reusing the DDT's. | 
 | 1037 | 	 */ | 
 | 1038 | 	if (req->src != req->dst) { | 
 | 1039 | 		dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, | 
 | 1040 | 			req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); | 
 | 1041 | 		if (!dev_req->src_ddt) | 
 | 1042 | 			goto out; | 
 | 1043 |  | 
 | 1044 | 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | 
 | 1045 | 			req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); | 
 | 1046 | 		if (!dev_req->dst_ddt) | 
 | 1047 | 			goto out_free_src; | 
 | 1048 | 	} else { | 
 | 1049 | 		dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, | 
 | 1050 | 			req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); | 
 | 1051 | 		if (!dev_req->dst_ddt) | 
 | 1052 | 			goto out; | 
 | 1053 |  | 
 | 1054 | 		dev_req->src_ddt = NULL; | 
 | 1055 | 		dev_req->src_addr = dev_req->dst_addr; | 
 | 1056 | 	} | 
 | 1057 |  | 
 | 1058 | 	err = -EINPROGRESS; | 
 | 1059 | 	spin_lock_irqsave(&engine->hw_lock, flags); | 
 | 1060 | 	/* | 
 | 1061 | 	 * Check if the engine will accept the operation now. If it won't then | 
 | 1062 | 	 * we either stick it on the end of a pending list if we can backlog, | 
 | 1063 | 	 * or bailout with an error if not. | 
 | 1064 | 	 */ | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1065 | 	if (unlikely(spacc_fifo_cmd_full(engine)) || | 
 | 1066 | 	    engine->in_flight + 1 > engine->fifo_sz) { | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1067 | 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | 
 | 1068 | 			err = -EBUSY; | 
 | 1069 | 			spin_unlock_irqrestore(&engine->hw_lock, flags); | 
 | 1070 | 			goto out_free_ddts; | 
 | 1071 | 		} | 
 | 1072 | 		list_add_tail(&dev_req->list, &engine->pending); | 
 | 1073 | 	} else { | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1074 | 		list_add_tail(&dev_req->list, &engine->pending); | 
 | 1075 | 		spacc_push(engine); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1076 | 	} | 
 | 1077 | 	spin_unlock_irqrestore(&engine->hw_lock, flags); | 
 | 1078 |  | 
 | 1079 | 	goto out; | 
 | 1080 |  | 
 | 1081 | out_free_ddts: | 
 | 1082 | 	spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, | 
 | 1083 | 		       req->nbytes, req->src == req->dst ? | 
 | 1084 | 		       DMA_BIDIRECTIONAL : DMA_FROM_DEVICE); | 
 | 1085 | out_free_src: | 
 | 1086 | 	if (req->src != req->dst) | 
 | 1087 | 		spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr, | 
 | 1088 | 			       req->src, req->nbytes, DMA_TO_DEVICE); | 
 | 1089 | out: | 
 | 1090 | 	return err; | 
 | 1091 | } | 
 | 1092 |  | 
 | 1093 | static int spacc_ablk_cra_init(struct crypto_tfm *tfm) | 
 | 1094 | { | 
 | 1095 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 1096 | 	struct crypto_alg *alg = tfm->__crt_alg; | 
 | 1097 | 	struct spacc_alg *spacc_alg = to_spacc_alg(alg); | 
 | 1098 | 	struct spacc_engine *engine = spacc_alg->engine; | 
 | 1099 |  | 
 | 1100 | 	ctx->generic.flags = spacc_alg->type; | 
 | 1101 | 	ctx->generic.engine = engine; | 
 | 1102 | 	if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { | 
 | 1103 | 		ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0, | 
 | 1104 | 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 
 | 1105 | 		if (IS_ERR(ctx->sw_cipher)) { | 
 | 1106 | 			dev_warn(engine->dev, "failed to allocate fallback for %s\n", | 
 | 1107 | 				 alg->cra_name); | 
 | 1108 | 			ctx->sw_cipher = NULL; | 
 | 1109 | 		} | 
 | 1110 | 	} | 
 | 1111 | 	ctx->generic.key_offs = spacc_alg->key_offs; | 
 | 1112 | 	ctx->generic.iv_offs = spacc_alg->iv_offs; | 
 | 1113 |  | 
 | 1114 | 	tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req); | 
 | 1115 |  | 
 | 1116 | 	return 0; | 
 | 1117 | } | 
 | 1118 |  | 
 | 1119 | static void spacc_ablk_cra_exit(struct crypto_tfm *tfm) | 
 | 1120 | { | 
 | 1121 | 	struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm); | 
 | 1122 |  | 
 | 1123 | 	if (ctx->sw_cipher) | 
 | 1124 | 		crypto_free_ablkcipher(ctx->sw_cipher); | 
 | 1125 | 	ctx->sw_cipher = NULL; | 
 | 1126 | } | 
 | 1127 |  | 
 | 1128 | static int spacc_ablk_encrypt(struct ablkcipher_request *req) | 
 | 1129 | { | 
 | 1130 | 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | 
 | 1131 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 1132 | 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | 
 | 1133 |  | 
 | 1134 | 	return spacc_ablk_setup(req, alg->type, 1); | 
 | 1135 | } | 
 | 1136 |  | 
 | 1137 | static int spacc_ablk_decrypt(struct ablkcipher_request *req) | 
 | 1138 | { | 
 | 1139 | 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); | 
 | 1140 | 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | 
 | 1141 | 	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg); | 
 | 1142 |  | 
 | 1143 | 	return spacc_ablk_setup(req, alg->type, 0); | 
 | 1144 | } | 
 | 1145 |  | 
 | 1146 | static inline int spacc_fifo_stat_empty(struct spacc_engine *engine) | 
 | 1147 | { | 
 | 1148 | 	return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) & | 
 | 1149 | 		SPA_FIFO_STAT_EMPTY; | 
 | 1150 | } | 
 | 1151 |  | 
 | 1152 | static void spacc_process_done(struct spacc_engine *engine) | 
 | 1153 | { | 
 | 1154 | 	struct spacc_req *req; | 
 | 1155 | 	unsigned long flags; | 
 | 1156 |  | 
 | 1157 | 	spin_lock_irqsave(&engine->hw_lock, flags); | 
 | 1158 |  | 
 | 1159 | 	while (!spacc_fifo_stat_empty(engine)) { | 
 | 1160 | 		req = list_first_entry(&engine->in_progress, struct spacc_req, | 
 | 1161 | 				       list); | 
 | 1162 | 		list_move_tail(&req->list, &engine->completed); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1163 | 		--engine->in_flight; | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1164 |  | 
 | 1165 | 		/* POP the status register. */ | 
 | 1166 | 		writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET); | 
 | 1167 | 		req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & | 
 | 1168 | 		     SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET; | 
 | 1169 |  | 
 | 1170 | 		/* | 
 | 1171 | 		 * Convert the SPAcc error status into the standard POSIX error | 
 | 1172 | 		 * codes. | 
 | 1173 | 		 */ | 
 | 1174 | 		if (unlikely(req->result)) { | 
 | 1175 | 			switch (req->result) { | 
 | 1176 | 			case SPA_STATUS_ICV_FAIL: | 
 | 1177 | 				req->result = -EBADMSG; | 
 | 1178 | 				break; | 
 | 1179 |  | 
 | 1180 | 			case SPA_STATUS_MEMORY_ERROR: | 
 | 1181 | 				dev_warn(engine->dev, | 
 | 1182 | 					 "memory error triggered\n"); | 
 | 1183 | 				req->result = -EFAULT; | 
 | 1184 | 				break; | 
 | 1185 |  | 
 | 1186 | 			case SPA_STATUS_BLOCK_ERROR: | 
 | 1187 | 				dev_warn(engine->dev, | 
 | 1188 | 					 "block error triggered\n"); | 
 | 1189 | 				req->result = -EIO; | 
 | 1190 | 				break; | 
 | 1191 | 			} | 
 | 1192 | 		} | 
 | 1193 | 	} | 
 | 1194 |  | 
 | 1195 | 	tasklet_schedule(&engine->complete); | 
 | 1196 |  | 
 | 1197 | 	spin_unlock_irqrestore(&engine->hw_lock, flags); | 
 | 1198 | } | 
 | 1199 |  | 
 | 1200 | static irqreturn_t spacc_spacc_irq(int irq, void *dev) | 
 | 1201 | { | 
 | 1202 | 	struct spacc_engine *engine = (struct spacc_engine *)dev; | 
 | 1203 | 	u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET); | 
 | 1204 |  | 
 | 1205 | 	writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET); | 
 | 1206 | 	spacc_process_done(engine); | 
 | 1207 |  | 
 | 1208 | 	return IRQ_HANDLED; | 
 | 1209 | } | 
 | 1210 |  | 
 | 1211 | static void spacc_packet_timeout(unsigned long data) | 
 | 1212 | { | 
 | 1213 | 	struct spacc_engine *engine = (struct spacc_engine *)data; | 
 | 1214 |  | 
 | 1215 | 	spacc_process_done(engine); | 
 | 1216 | } | 
 | 1217 |  | 
 | 1218 | static int spacc_req_submit(struct spacc_req *req) | 
 | 1219 | { | 
 | 1220 | 	struct crypto_alg *alg = req->req->tfm->__crt_alg; | 
 | 1221 |  | 
 | 1222 | 	if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags)) | 
 | 1223 | 		return spacc_aead_submit(req); | 
 | 1224 | 	else | 
 | 1225 | 		return spacc_ablk_submit(req); | 
 | 1226 | } | 
 | 1227 |  | 
 | 1228 | static void spacc_spacc_complete(unsigned long data) | 
 | 1229 | { | 
 | 1230 | 	struct spacc_engine *engine = (struct spacc_engine *)data; | 
 | 1231 | 	struct spacc_req *req, *tmp; | 
 | 1232 | 	unsigned long flags; | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1233 | 	LIST_HEAD(completed); | 
 | 1234 |  | 
 | 1235 | 	spin_lock_irqsave(&engine->hw_lock, flags); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1236 |  | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1237 | 	list_splice_init(&engine->completed, &completed); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1238 | 	spacc_push(engine); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1239 | 	if (engine->in_flight) | 
 | 1240 | 		mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT); | 
 | 1241 |  | 
 | 1242 | 	spin_unlock_irqrestore(&engine->hw_lock, flags); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1243 |  | 
 | 1244 | 	list_for_each_entry_safe(req, tmp, &completed, list) { | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1245 | 		list_del(&req->list); | 
| Jamie Iles | b64dc04 | 2011-08-02 11:29:06 +0100 | [diff] [blame] | 1246 | 		req->complete(req); | 
| Jamie Iles | 40bfc14 | 2011-03-27 10:48:29 +0800 | [diff] [blame] | 1247 | 	} | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1248 | } | 
 | 1249 |  | 
 | 1250 | #ifdef CONFIG_PM | 
 | 1251 | static int spacc_suspend(struct device *dev) | 
 | 1252 | { | 
 | 1253 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 1254 | 	struct spacc_engine *engine = platform_get_drvdata(pdev); | 
 | 1255 |  | 
 | 1256 | 	/* | 
 | 1257 | 	 * We only support standby mode. All we have to do is gate the clock to | 
 | 1258 | 	 * the spacc. The hardware will preserve state until we turn it back | 
 | 1259 | 	 * on again. | 
 | 1260 | 	 */ | 
 | 1261 | 	clk_disable(engine->clk); | 
 | 1262 |  | 
 | 1263 | 	return 0; | 
 | 1264 | } | 
 | 1265 |  | 
 | 1266 | static int spacc_resume(struct device *dev) | 
 | 1267 | { | 
 | 1268 | 	struct platform_device *pdev = to_platform_device(dev); | 
 | 1269 | 	struct spacc_engine *engine = platform_get_drvdata(pdev); | 
 | 1270 |  | 
 | 1271 | 	return clk_enable(engine->clk); | 
 | 1272 | } | 
 | 1273 |  | 
 | 1274 | static const struct dev_pm_ops spacc_pm_ops = { | 
 | 1275 | 	.suspend	= spacc_suspend, | 
 | 1276 | 	.resume		= spacc_resume, | 
 | 1277 | }; | 
 | 1278 | #endif /* CONFIG_PM */ | 
 | 1279 |  | 
 | 1280 | static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev) | 
 | 1281 | { | 
 | 1282 | 	return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL; | 
 | 1283 | } | 
 | 1284 |  | 
 | 1285 | static ssize_t spacc_stat_irq_thresh_show(struct device *dev, | 
 | 1286 | 					  struct device_attribute *attr, | 
 | 1287 | 					  char *buf) | 
 | 1288 | { | 
 | 1289 | 	struct spacc_engine *engine = spacc_dev_to_engine(dev); | 
 | 1290 |  | 
 | 1291 | 	return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh); | 
 | 1292 | } | 
 | 1293 |  | 
 | 1294 | static ssize_t spacc_stat_irq_thresh_store(struct device *dev, | 
 | 1295 | 					   struct device_attribute *attr, | 
 | 1296 | 					   const char *buf, size_t len) | 
 | 1297 | { | 
 | 1298 | 	struct spacc_engine *engine = spacc_dev_to_engine(dev); | 
 | 1299 | 	unsigned long thresh; | 
 | 1300 |  | 
 | 1301 | 	if (strict_strtoul(buf, 0, &thresh)) | 
 | 1302 | 		return -EINVAL; | 
 | 1303 |  | 
 | 1304 | 	thresh = clamp(thresh, 1UL, engine->fifo_sz - 1); | 
 | 1305 |  | 
 | 1306 | 	engine->stat_irq_thresh = thresh; | 
 | 1307 | 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | 
 | 1308 | 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | 
 | 1309 |  | 
 | 1310 | 	return len; | 
 | 1311 | } | 
 | 1312 | static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show, | 
 | 1313 | 		   spacc_stat_irq_thresh_store); | 
 | 1314 |  | 
 | 1315 | static struct spacc_alg ipsec_engine_algs[] = { | 
 | 1316 | 	{ | 
 | 1317 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC, | 
 | 1318 | 		.key_offs = 0, | 
 | 1319 | 		.iv_offs = AES_MAX_KEY_SIZE, | 
 | 1320 | 		.alg = { | 
 | 1321 | 			.cra_name = "cbc(aes)", | 
 | 1322 | 			.cra_driver_name = "cbc-aes-picoxcell", | 
 | 1323 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1324 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 
 | 1325 | 				     CRYPTO_ALG_ASYNC | | 
 | 1326 | 				     CRYPTO_ALG_NEED_FALLBACK, | 
 | 1327 | 			.cra_blocksize = AES_BLOCK_SIZE, | 
 | 1328 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1329 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1330 | 			.cra_module = THIS_MODULE, | 
 | 1331 | 			.cra_ablkcipher = { | 
 | 1332 | 				.setkey = spacc_aes_setkey, | 
 | 1333 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1334 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1335 | 				.min_keysize = AES_MIN_KEY_SIZE, | 
 | 1336 | 				.max_keysize = AES_MAX_KEY_SIZE, | 
 | 1337 | 				.ivsize = AES_BLOCK_SIZE, | 
 | 1338 | 			}, | 
 | 1339 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1340 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1341 | 		}, | 
 | 1342 | 	}, | 
 | 1343 | 	{ | 
 | 1344 | 		.key_offs = 0, | 
 | 1345 | 		.iv_offs = AES_MAX_KEY_SIZE, | 
 | 1346 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB, | 
 | 1347 | 		.alg = { | 
 | 1348 | 			.cra_name = "ecb(aes)", | 
 | 1349 | 			.cra_driver_name = "ecb-aes-picoxcell", | 
 | 1350 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1351 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | | 
 | 1352 | 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, | 
 | 1353 | 			.cra_blocksize = AES_BLOCK_SIZE, | 
 | 1354 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1355 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1356 | 			.cra_module = THIS_MODULE, | 
 | 1357 | 			.cra_ablkcipher = { | 
 | 1358 | 				.setkey = spacc_aes_setkey, | 
 | 1359 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1360 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1361 | 				.min_keysize = AES_MIN_KEY_SIZE, | 
 | 1362 | 				.max_keysize = AES_MAX_KEY_SIZE, | 
 | 1363 | 			}, | 
 | 1364 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1365 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1366 | 		}, | 
 | 1367 | 	}, | 
 | 1368 | 	{ | 
 | 1369 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1370 | 		.iv_offs = 0, | 
 | 1371 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | 
 | 1372 | 		.alg = { | 
 | 1373 | 			.cra_name = "cbc(des)", | 
 | 1374 | 			.cra_driver_name = "cbc-des-picoxcell", | 
 | 1375 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1376 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 1377 | 			.cra_blocksize = DES_BLOCK_SIZE, | 
 | 1378 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1379 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1380 | 			.cra_module = THIS_MODULE, | 
 | 1381 | 			.cra_ablkcipher = { | 
 | 1382 | 				.setkey = spacc_des_setkey, | 
 | 1383 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1384 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1385 | 				.min_keysize = DES_KEY_SIZE, | 
 | 1386 | 				.max_keysize = DES_KEY_SIZE, | 
 | 1387 | 				.ivsize = DES_BLOCK_SIZE, | 
 | 1388 | 			}, | 
 | 1389 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1390 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1391 | 		}, | 
 | 1392 | 	}, | 
 | 1393 | 	{ | 
 | 1394 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1395 | 		.iv_offs = 0, | 
 | 1396 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | 
 | 1397 | 		.alg = { | 
 | 1398 | 			.cra_name = "ecb(des)", | 
 | 1399 | 			.cra_driver_name = "ecb-des-picoxcell", | 
 | 1400 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1401 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 1402 | 			.cra_blocksize = DES_BLOCK_SIZE, | 
 | 1403 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1404 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1405 | 			.cra_module = THIS_MODULE, | 
 | 1406 | 			.cra_ablkcipher = { | 
 | 1407 | 				.setkey = spacc_des_setkey, | 
 | 1408 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1409 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1410 | 				.min_keysize = DES_KEY_SIZE, | 
 | 1411 | 				.max_keysize = DES_KEY_SIZE, | 
 | 1412 | 			}, | 
 | 1413 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1414 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1415 | 		}, | 
 | 1416 | 	}, | 
 | 1417 | 	{ | 
 | 1418 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1419 | 		.iv_offs = 0, | 
 | 1420 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC, | 
 | 1421 | 		.alg = { | 
 | 1422 | 			.cra_name = "cbc(des3_ede)", | 
 | 1423 | 			.cra_driver_name = "cbc-des3-ede-picoxcell", | 
 | 1424 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1425 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 1426 | 			.cra_blocksize = DES3_EDE_BLOCK_SIZE, | 
 | 1427 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1428 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1429 | 			.cra_module = THIS_MODULE, | 
 | 1430 | 			.cra_ablkcipher = { | 
 | 1431 | 				.setkey = spacc_des_setkey, | 
 | 1432 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1433 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1434 | 				.min_keysize = DES3_EDE_KEY_SIZE, | 
 | 1435 | 				.max_keysize = DES3_EDE_KEY_SIZE, | 
 | 1436 | 				.ivsize = DES3_EDE_BLOCK_SIZE, | 
 | 1437 | 			}, | 
 | 1438 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1439 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1440 | 		}, | 
 | 1441 | 	}, | 
 | 1442 | 	{ | 
 | 1443 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1444 | 		.iv_offs = 0, | 
 | 1445 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB, | 
 | 1446 | 		.alg = { | 
 | 1447 | 			.cra_name = "ecb(des3_ede)", | 
 | 1448 | 			.cra_driver_name = "ecb-des3-ede-picoxcell", | 
 | 1449 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1450 | 			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, | 
 | 1451 | 			.cra_blocksize = DES3_EDE_BLOCK_SIZE, | 
 | 1452 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1453 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1454 | 			.cra_module = THIS_MODULE, | 
 | 1455 | 			.cra_ablkcipher = { | 
 | 1456 | 				.setkey = spacc_des_setkey, | 
 | 1457 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1458 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1459 | 				.min_keysize = DES3_EDE_KEY_SIZE, | 
 | 1460 | 				.max_keysize = DES3_EDE_KEY_SIZE, | 
 | 1461 | 			}, | 
 | 1462 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1463 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1464 | 		}, | 
 | 1465 | 	}, | 
 | 1466 | 	{ | 
 | 1467 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1468 | 				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | 
 | 1469 | 		.key_offs = 0, | 
 | 1470 | 		.iv_offs = AES_MAX_KEY_SIZE, | 
 | 1471 | 		.alg = { | 
 | 1472 | 			.cra_name = "authenc(hmac(sha1),cbc(aes))", | 
 | 1473 | 			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell", | 
 | 1474 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1475 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1476 | 			.cra_blocksize = AES_BLOCK_SIZE, | 
 | 1477 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1478 | 			.cra_type = &crypto_aead_type, | 
 | 1479 | 			.cra_module = THIS_MODULE, | 
 | 1480 | 			.cra_aead = { | 
 | 1481 | 				.setkey = spacc_aead_setkey, | 
 | 1482 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1483 | 				.encrypt = spacc_aead_encrypt, | 
 | 1484 | 				.decrypt = spacc_aead_decrypt, | 
 | 1485 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1486 | 				.ivsize = AES_BLOCK_SIZE, | 
 | 1487 | 				.maxauthsize = SHA1_DIGEST_SIZE, | 
 | 1488 | 			}, | 
 | 1489 | 			.cra_init = spacc_aead_cra_init, | 
 | 1490 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1491 | 		}, | 
 | 1492 | 	}, | 
 | 1493 | 	{ | 
 | 1494 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1495 | 				SPA_CTRL_HASH_ALG_SHA256 | | 
 | 1496 | 				SPA_CTRL_HASH_MODE_HMAC, | 
 | 1497 | 		.key_offs = 0, | 
 | 1498 | 		.iv_offs = AES_MAX_KEY_SIZE, | 
 | 1499 | 		.alg = { | 
 | 1500 | 			.cra_name = "authenc(hmac(sha256),cbc(aes))", | 
 | 1501 | 			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell", | 
 | 1502 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1503 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1504 | 			.cra_blocksize = AES_BLOCK_SIZE, | 
 | 1505 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1506 | 			.cra_type = &crypto_aead_type, | 
 | 1507 | 			.cra_module = THIS_MODULE, | 
 | 1508 | 			.cra_aead = { | 
 | 1509 | 				.setkey = spacc_aead_setkey, | 
 | 1510 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1511 | 				.encrypt = spacc_aead_encrypt, | 
 | 1512 | 				.decrypt = spacc_aead_decrypt, | 
 | 1513 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1514 | 				.ivsize = AES_BLOCK_SIZE, | 
 | 1515 | 				.maxauthsize = SHA256_DIGEST_SIZE, | 
 | 1516 | 			}, | 
 | 1517 | 			.cra_init = spacc_aead_cra_init, | 
 | 1518 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1519 | 		}, | 
 | 1520 | 	}, | 
 | 1521 | 	{ | 
 | 1522 | 		.key_offs = 0, | 
 | 1523 | 		.iv_offs = AES_MAX_KEY_SIZE, | 
 | 1524 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1525 | 				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | 
 | 1526 | 		.alg = { | 
 | 1527 | 			.cra_name = "authenc(hmac(md5),cbc(aes))", | 
 | 1528 | 			.cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell", | 
 | 1529 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1530 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1531 | 			.cra_blocksize = AES_BLOCK_SIZE, | 
 | 1532 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1533 | 			.cra_type = &crypto_aead_type, | 
 | 1534 | 			.cra_module = THIS_MODULE, | 
 | 1535 | 			.cra_aead = { | 
 | 1536 | 				.setkey = spacc_aead_setkey, | 
 | 1537 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1538 | 				.encrypt = spacc_aead_encrypt, | 
 | 1539 | 				.decrypt = spacc_aead_decrypt, | 
 | 1540 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1541 | 				.ivsize = AES_BLOCK_SIZE, | 
 | 1542 | 				.maxauthsize = MD5_DIGEST_SIZE, | 
 | 1543 | 			}, | 
 | 1544 | 			.cra_init = spacc_aead_cra_init, | 
 | 1545 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1546 | 		}, | 
 | 1547 | 	}, | 
 | 1548 | 	{ | 
 | 1549 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1550 | 		.iv_offs = 0, | 
 | 1551 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1552 | 				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC, | 
 | 1553 | 		.alg = { | 
 | 1554 | 			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))", | 
 | 1555 | 			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell", | 
 | 1556 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1557 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1558 | 			.cra_blocksize = DES3_EDE_BLOCK_SIZE, | 
 | 1559 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1560 | 			.cra_type = &crypto_aead_type, | 
 | 1561 | 			.cra_module = THIS_MODULE, | 
 | 1562 | 			.cra_aead = { | 
 | 1563 | 				.setkey = spacc_aead_setkey, | 
 | 1564 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1565 | 				.encrypt = spacc_aead_encrypt, | 
 | 1566 | 				.decrypt = spacc_aead_decrypt, | 
 | 1567 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1568 | 				.ivsize = DES3_EDE_BLOCK_SIZE, | 
 | 1569 | 				.maxauthsize = SHA1_DIGEST_SIZE, | 
 | 1570 | 			}, | 
 | 1571 | 			.cra_init = spacc_aead_cra_init, | 
 | 1572 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1573 | 		}, | 
 | 1574 | 	}, | 
 | 1575 | 	{ | 
 | 1576 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1577 | 		.iv_offs = 0, | 
 | 1578 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1579 | 				SPA_CTRL_HASH_ALG_SHA256 | | 
 | 1580 | 				SPA_CTRL_HASH_MODE_HMAC, | 
 | 1581 | 		.alg = { | 
 | 1582 | 			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))", | 
 | 1583 | 			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell", | 
 | 1584 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1585 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1586 | 			.cra_blocksize = DES3_EDE_BLOCK_SIZE, | 
 | 1587 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1588 | 			.cra_type = &crypto_aead_type, | 
 | 1589 | 			.cra_module = THIS_MODULE, | 
 | 1590 | 			.cra_aead = { | 
 | 1591 | 				.setkey = spacc_aead_setkey, | 
 | 1592 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1593 | 				.encrypt = spacc_aead_encrypt, | 
 | 1594 | 				.decrypt = spacc_aead_decrypt, | 
 | 1595 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1596 | 				.ivsize = DES3_EDE_BLOCK_SIZE, | 
 | 1597 | 				.maxauthsize = SHA256_DIGEST_SIZE, | 
 | 1598 | 			}, | 
 | 1599 | 			.cra_init = spacc_aead_cra_init, | 
 | 1600 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1601 | 		}, | 
 | 1602 | 	}, | 
 | 1603 | 	{ | 
 | 1604 | 		.key_offs = DES_BLOCK_SIZE, | 
 | 1605 | 		.iv_offs = 0, | 
 | 1606 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC | | 
 | 1607 | 				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC, | 
 | 1608 | 		.alg = { | 
 | 1609 | 			.cra_name = "authenc(hmac(md5),cbc(des3_ede))", | 
 | 1610 | 			.cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell", | 
 | 1611 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1612 | 			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, | 
 | 1613 | 			.cra_blocksize = DES3_EDE_BLOCK_SIZE, | 
 | 1614 | 			.cra_ctxsize = sizeof(struct spacc_aead_ctx), | 
 | 1615 | 			.cra_type = &crypto_aead_type, | 
 | 1616 | 			.cra_module = THIS_MODULE, | 
 | 1617 | 			.cra_aead = { | 
 | 1618 | 				.setkey = spacc_aead_setkey, | 
 | 1619 | 				.setauthsize = spacc_aead_setauthsize, | 
 | 1620 | 				.encrypt = spacc_aead_encrypt, | 
 | 1621 | 				.decrypt = spacc_aead_decrypt, | 
 | 1622 | 				.givencrypt = spacc_aead_givencrypt, | 
 | 1623 | 				.ivsize = DES3_EDE_BLOCK_SIZE, | 
 | 1624 | 				.maxauthsize = MD5_DIGEST_SIZE, | 
 | 1625 | 			}, | 
 | 1626 | 			.cra_init = spacc_aead_cra_init, | 
 | 1627 | 			.cra_exit = spacc_aead_cra_exit, | 
 | 1628 | 		}, | 
 | 1629 | 	}, | 
 | 1630 | }; | 
 | 1631 |  | 
 | 1632 | static struct spacc_alg l2_engine_algs[] = { | 
 | 1633 | 	{ | 
 | 1634 | 		.key_offs = 0, | 
 | 1635 | 		.iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN, | 
 | 1636 | 		.ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI | | 
 | 1637 | 				SPA_CTRL_CIPH_MODE_F8, | 
 | 1638 | 		.alg = { | 
 | 1639 | 			.cra_name = "f8(kasumi)", | 
 | 1640 | 			.cra_driver_name = "f8-kasumi-picoxcell", | 
 | 1641 | 			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY, | 
 | 1642 | 			.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC, | 
 | 1643 | 			.cra_blocksize = 8, | 
 | 1644 | 			.cra_ctxsize = sizeof(struct spacc_ablk_ctx), | 
 | 1645 | 			.cra_type = &crypto_ablkcipher_type, | 
 | 1646 | 			.cra_module = THIS_MODULE, | 
 | 1647 | 			.cra_ablkcipher = { | 
 | 1648 | 				.setkey = spacc_kasumi_f8_setkey, | 
 | 1649 | 				.encrypt = spacc_ablk_encrypt, | 
 | 1650 | 				.decrypt = spacc_ablk_decrypt, | 
 | 1651 | 				.min_keysize = 16, | 
 | 1652 | 				.max_keysize = 16, | 
 | 1653 | 				.ivsize = 8, | 
 | 1654 | 			}, | 
 | 1655 | 			.cra_init = spacc_ablk_cra_init, | 
 | 1656 | 			.cra_exit = spacc_ablk_cra_exit, | 
 | 1657 | 		}, | 
 | 1658 | 	}, | 
 | 1659 | }; | 
 | 1660 |  | 
| Jamie Iles | 30343ef | 2011-08-01 17:25:19 +0100 | [diff] [blame] | 1661 | #ifdef CONFIG_OF | 
 | 1662 | static const struct of_device_id spacc_of_id_table[] = { | 
 | 1663 | 	{ .compatible = "picochip,spacc-ipsec" }, | 
 | 1664 | 	{ .compatible = "picochip,spacc-l2" }, | 
 | 1665 | 	{} | 
 | 1666 | }; | 
 | 1667 | #else /* CONFIG_OF */ | 
 | 1668 | #define spacc_of_id_table NULL | 
 | 1669 | #endif /* CONFIG_OF */ | 
 | 1670 |  | 
 | 1671 | static bool spacc_is_compatible(struct platform_device *pdev, | 
 | 1672 | 				const char *spacc_type) | 
 | 1673 | { | 
 | 1674 | 	const struct platform_device_id *platid = platform_get_device_id(pdev); | 
 | 1675 |  | 
 | 1676 | 	if (platid && !strcmp(platid->name, spacc_type)) | 
 | 1677 | 		return true; | 
 | 1678 |  | 
 | 1679 | #ifdef CONFIG_OF | 
 | 1680 | 	if (of_device_is_compatible(pdev->dev.of_node, spacc_type)) | 
 | 1681 | 		return true; | 
 | 1682 | #endif /* CONFIG_OF */ | 
 | 1683 |  | 
 | 1684 | 	return false; | 
 | 1685 | } | 
 | 1686 |  | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1687 | static int __devinit spacc_probe(struct platform_device *pdev) | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1688 | { | 
 | 1689 | 	int i, err, ret = -EINVAL; | 
 | 1690 | 	struct resource *mem, *irq; | 
 | 1691 | 	struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine), | 
 | 1692 | 						   GFP_KERNEL); | 
 | 1693 | 	if (!engine) | 
 | 1694 | 		return -ENOMEM; | 
 | 1695 |  | 
| Jamie Iles | 30343ef | 2011-08-01 17:25:19 +0100 | [diff] [blame] | 1696 | 	if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) { | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1697 | 		engine->max_ctxs	= SPACC_CRYPTO_IPSEC_MAX_CTXS; | 
 | 1698 | 		engine->cipher_pg_sz	= SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ; | 
 | 1699 | 		engine->hash_pg_sz	= SPACC_CRYPTO_IPSEC_HASH_PG_SZ; | 
 | 1700 | 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ; | 
 | 1701 | 		engine->algs		= ipsec_engine_algs; | 
 | 1702 | 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs); | 
| Jamie Iles | 30343ef | 2011-08-01 17:25:19 +0100 | [diff] [blame] | 1703 | 	} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) { | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1704 | 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS; | 
 | 1705 | 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ; | 
 | 1706 | 		engine->hash_pg_sz	= SPACC_CRYPTO_L2_HASH_PG_SZ; | 
 | 1707 | 		engine->fifo_sz		= SPACC_CRYPTO_L2_FIFO_SZ; | 
 | 1708 | 		engine->algs		= l2_engine_algs; | 
 | 1709 | 		engine->num_algs	= ARRAY_SIZE(l2_engine_algs); | 
 | 1710 | 	} else { | 
 | 1711 | 		return -EINVAL; | 
 | 1712 | 	} | 
 | 1713 |  | 
 | 1714 | 	engine->name = dev_name(&pdev->dev); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1715 |  | 
 | 1716 | 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1717 | 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 
 | 1718 | 	if (!mem || !irq) { | 
 | 1719 | 		dev_err(&pdev->dev, "no memory/irq resource for engine\n"); | 
 | 1720 | 		return -ENXIO; | 
 | 1721 | 	} | 
 | 1722 |  | 
 | 1723 | 	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), | 
 | 1724 | 				     engine->name)) | 
 | 1725 | 		return -ENOMEM; | 
 | 1726 |  | 
 | 1727 | 	engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); | 
 | 1728 | 	if (!engine->regs) { | 
 | 1729 | 		dev_err(&pdev->dev, "memory map failed\n"); | 
 | 1730 | 		return -ENOMEM; | 
 | 1731 | 	} | 
 | 1732 |  | 
 | 1733 | 	if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0, | 
 | 1734 | 			     engine->name, engine)) { | 
 | 1735 | 		dev_err(engine->dev, "failed to request IRQ\n"); | 
 | 1736 | 		return -EBUSY; | 
 | 1737 | 	} | 
 | 1738 |  | 
 | 1739 | 	engine->dev		= &pdev->dev; | 
 | 1740 | 	engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET; | 
 | 1741 | 	engine->hash_key_base	= engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET; | 
 | 1742 |  | 
 | 1743 | 	engine->req_pool = dmam_pool_create(engine->name, engine->dev, | 
 | 1744 | 		MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K); | 
 | 1745 | 	if (!engine->req_pool) | 
 | 1746 | 		return -ENOMEM; | 
 | 1747 |  | 
 | 1748 | 	spin_lock_init(&engine->hw_lock); | 
 | 1749 |  | 
| Jamie Iles | 4efae8c | 2011-08-01 17:25:18 +0100 | [diff] [blame] | 1750 | 	engine->clk = clk_get(&pdev->dev, "ref"); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1751 | 	if (IS_ERR(engine->clk)) { | 
 | 1752 | 		dev_info(&pdev->dev, "clk unavailable\n"); | 
 | 1753 | 		device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 
 | 1754 | 		return PTR_ERR(engine->clk); | 
 | 1755 | 	} | 
 | 1756 |  | 
 | 1757 | 	if (clk_enable(engine->clk)) { | 
 | 1758 | 		dev_info(&pdev->dev, "unable to enable clk\n"); | 
 | 1759 | 		clk_put(engine->clk); | 
 | 1760 | 		return -EIO; | 
 | 1761 | 	} | 
 | 1762 |  | 
 | 1763 | 	err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 
 | 1764 | 	if (err) { | 
 | 1765 | 		clk_disable(engine->clk); | 
 | 1766 | 		clk_put(engine->clk); | 
 | 1767 | 		return err; | 
 | 1768 | 	} | 
 | 1769 |  | 
 | 1770 |  | 
 | 1771 | 	/* | 
 | 1772 | 	 * Use an IRQ threshold of 50% as a default. This seems to be a | 
 | 1773 | 	 * reasonable trade off of latency against throughput but can be | 
 | 1774 | 	 * changed at runtime. | 
 | 1775 | 	 */ | 
 | 1776 | 	engine->stat_irq_thresh = (engine->fifo_sz / 2); | 
 | 1777 |  | 
 | 1778 | 	/* | 
 | 1779 | 	 * Configure the interrupts. We only use the STAT_CNT interrupt as we | 
 | 1780 | 	 * only submit a new packet for processing when we complete another in | 
 | 1781 | 	 * the queue. This minimizes time spent in the interrupt handler. | 
 | 1782 | 	 */ | 
 | 1783 | 	writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET, | 
 | 1784 | 	       engine->regs + SPA_IRQ_CTRL_REG_OFFSET); | 
 | 1785 | 	writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, | 
 | 1786 | 	       engine->regs + SPA_IRQ_EN_REG_OFFSET); | 
 | 1787 |  | 
 | 1788 | 	setup_timer(&engine->packet_timeout, spacc_packet_timeout, | 
 | 1789 | 		    (unsigned long)engine); | 
 | 1790 |  | 
 | 1791 | 	INIT_LIST_HEAD(&engine->pending); | 
 | 1792 | 	INIT_LIST_HEAD(&engine->completed); | 
 | 1793 | 	INIT_LIST_HEAD(&engine->in_progress); | 
 | 1794 | 	engine->in_flight = 0; | 
 | 1795 | 	tasklet_init(&engine->complete, spacc_spacc_complete, | 
 | 1796 | 		     (unsigned long)engine); | 
 | 1797 |  | 
 | 1798 | 	platform_set_drvdata(pdev, engine); | 
 | 1799 |  | 
 | 1800 | 	INIT_LIST_HEAD(&engine->registered_algs); | 
 | 1801 | 	for (i = 0; i < engine->num_algs; ++i) { | 
 | 1802 | 		engine->algs[i].engine = engine; | 
 | 1803 | 		err = crypto_register_alg(&engine->algs[i].alg); | 
 | 1804 | 		if (!err) { | 
 | 1805 | 			list_add_tail(&engine->algs[i].entry, | 
 | 1806 | 				      &engine->registered_algs); | 
 | 1807 | 			ret = 0; | 
 | 1808 | 		} | 
 | 1809 | 		if (err) | 
 | 1810 | 			dev_err(engine->dev, "failed to register alg \"%s\"\n", | 
 | 1811 | 				engine->algs[i].alg.cra_name); | 
 | 1812 | 		else | 
 | 1813 | 			dev_dbg(engine->dev, "registered alg \"%s\"\n", | 
 | 1814 | 				engine->algs[i].alg.cra_name); | 
 | 1815 | 	} | 
 | 1816 |  | 
 | 1817 | 	return ret; | 
 | 1818 | } | 
 | 1819 |  | 
 | 1820 | static int __devexit spacc_remove(struct platform_device *pdev) | 
 | 1821 | { | 
 | 1822 | 	struct spacc_alg *alg, *next; | 
 | 1823 | 	struct spacc_engine *engine = platform_get_drvdata(pdev); | 
 | 1824 |  | 
 | 1825 | 	del_timer_sync(&engine->packet_timeout); | 
 | 1826 | 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh); | 
 | 1827 |  | 
 | 1828 | 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) { | 
 | 1829 | 		list_del(&alg->entry); | 
 | 1830 | 		crypto_unregister_alg(&alg->alg); | 
 | 1831 | 	} | 
 | 1832 |  | 
 | 1833 | 	clk_disable(engine->clk); | 
 | 1834 | 	clk_put(engine->clk); | 
 | 1835 |  | 
 | 1836 | 	return 0; | 
 | 1837 | } | 
 | 1838 |  | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1839 | static const struct platform_device_id spacc_id_table[] = { | 
 | 1840 | 	{ "picochip,spacc-ipsec", }, | 
 | 1841 | 	{ "picochip,spacc-l2", }, | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1842 | }; | 
 | 1843 |  | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1844 | static struct platform_driver spacc_driver = { | 
 | 1845 | 	.probe		= spacc_probe, | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1846 | 	.remove		= __devexit_p(spacc_remove), | 
 | 1847 | 	.driver		= { | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1848 | 		.name	= "picochip,spacc", | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1849 | #ifdef CONFIG_PM | 
 | 1850 | 		.pm	= &spacc_pm_ops, | 
 | 1851 | #endif /* CONFIG_PM */ | 
| Jamie Iles | 30343ef | 2011-08-01 17:25:19 +0100 | [diff] [blame] | 1852 | 		.of_match_table	= spacc_of_id_table, | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1853 | 	}, | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1854 | 	.id_table	= spacc_id_table, | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1855 | }; | 
 | 1856 |  | 
 | 1857 | static int __init spacc_init(void) | 
 | 1858 | { | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1859 | 	return platform_driver_register(&spacc_driver); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1860 | } | 
 | 1861 | module_init(spacc_init); | 
 | 1862 |  | 
 | 1863 | static void __exit spacc_exit(void) | 
 | 1864 | { | 
| Jamie Iles | c3f4200 | 2011-08-01 17:25:17 +0100 | [diff] [blame] | 1865 | 	platform_driver_unregister(&spacc_driver); | 
| Jamie Iles | ce92136 | 2011-02-21 16:43:21 +1100 | [diff] [blame] | 1866 | } | 
 | 1867 | module_exit(spacc_exit); | 
 | 1868 |  | 
 | 1869 | MODULE_LICENSE("GPL"); | 
 | 1870 | MODULE_AUTHOR("Jamie Iles"); |