blob: b8b9267c4dbbeace93c5d5af442c0fd9016f551e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
Milan Broz542da312009-12-10 23:51:57 +00004 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This file is released under the GPL.
7 */
8
Milan Broz43d69032008-02-08 02:11:09 +00009#include <linux/completion.h>
Herbert Xud1806f62006-08-22 20:29:17 +100010#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/bio.h>
15#include <linux/blkdev.h>
16#include <linux/mempool.h>
17#include <linux/slab.h>
18#include <linux/crypto.h>
19#include <linux/workqueue.h>
Andrew Morton3fcfab12006-10-19 23:28:16 -070020#include <linux/backing-dev.h>
Andi Kleenc0297722011-01-13 19:59:53 +000021#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/atomic.h>
David Hardeman378f0582005-09-17 17:55:31 +100023#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/page.h>
Rik Snel48527fa2006-09-03 08:56:39 +100025#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Mikulas Patocka586e80e2008-10-21 17:44:59 +010027#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Alasdair G Kergon72d94862006-06-26 00:27:35 -070029#define DM_MSG_PREFIX "crypt"
Milan Broze48d4bb2006-10-03 01:15:37 -070030#define MESG_STR(x) x, sizeof(x)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * context holding the current state of a multi-part conversion
34 */
35struct convert_context {
Milan Broz43d69032008-02-08 02:11:09 +000036 struct completion restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 struct bio *bio_in;
38 struct bio *bio_out;
39 unsigned int offset_in;
40 unsigned int offset_out;
41 unsigned int idx_in;
42 unsigned int idx_out;
43 sector_t sector;
Milan Broz43d69032008-02-08 02:11:09 +000044 atomic_t pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045};
46
Milan Broz53017032008-02-08 02:10:38 +000047/*
48 * per bio private data
49 */
50struct dm_crypt_io {
51 struct dm_target *target;
52 struct bio *base_bio;
53 struct work_struct work;
54
55 struct convert_context ctx;
56
57 atomic_t pending;
58 int error;
Milan Broz0c395b02008-02-08 02:10:54 +000059 sector_t sector;
Milan Broz393b47e2008-10-21 17:45:02 +010060 struct dm_crypt_io *base_io;
Milan Broz53017032008-02-08 02:10:38 +000061};
62
Milan Broz01482b72008-02-08 02:11:04 +000063struct dm_crypt_request {
Huang Yingb2174ee2009-03-16 17:44:33 +000064 struct convert_context *ctx;
Milan Broz01482b72008-02-08 02:11:04 +000065 struct scatterlist sg_in;
66 struct scatterlist sg_out;
Milan Broz2dc53272011-01-13 19:59:54 +000067 sector_t iv_sector;
Milan Broz01482b72008-02-08 02:11:04 +000068};
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070struct crypt_config;
71
72struct crypt_iv_operations {
73 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +010074 const char *opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 void (*dtr)(struct crypt_config *cc);
Milan Brozb95bf2d2009-12-10 23:51:56 +000076 int (*init)(struct crypt_config *cc);
Milan Broz542da312009-12-10 23:51:57 +000077 int (*wipe)(struct crypt_config *cc);
Milan Broz2dc53272011-01-13 19:59:54 +000078 int (*generator)(struct crypt_config *cc, u8 *iv,
79 struct dm_crypt_request *dmreq);
80 int (*post)(struct crypt_config *cc, u8 *iv,
81 struct dm_crypt_request *dmreq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Milan Broz60473592009-12-10 23:51:55 +000084struct iv_essiv_private {
Milan Brozb95bf2d2009-12-10 23:51:56 +000085 struct crypto_hash *hash_tfm;
86 u8 *salt;
Milan Broz60473592009-12-10 23:51:55 +000087};
88
89struct iv_benbi_private {
90 int shift;
91};
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
94 * Crypt: maps a linear range of a block device
95 * and encrypts / decrypts at the same time.
96 */
Milan Broze48d4bb2006-10-03 01:15:37 -070097enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
Andi Kleenc0297722011-01-13 19:59:53 +000098
99/*
100 * Duplicated per-CPU state for cipher.
101 */
102struct crypt_cpu {
103 struct ablkcipher_request *req;
Andi Kleenc0297722011-01-13 19:59:53 +0000104 /* ESSIV: struct crypto_cipher *essiv_tfm */
105 void *iv_private;
Milan Brozd1f96422011-01-13 19:59:54 +0000106 struct crypto_ablkcipher *tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000107};
108
109/*
110 * The fields in here must be read only after initialization,
111 * changing state should be in crypt_cpu.
112 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113struct crypt_config {
114 struct dm_dev *dev;
115 sector_t start;
116
117 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000118 * pool for per bio private data, crypto requests and
119 * encryption requeusts/buffer pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 */
121 mempool_t *io_pool;
Milan Brozddd42ed2008-02-08 02:11:07 +0000122 mempool_t *req_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 mempool_t *page_pool;
Milan Broz6a24c712006-10-03 01:15:40 -0700124 struct bio_set *bs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Milan Brozcabf08e2007-10-19 22:38:58 +0100126 struct workqueue_struct *io_queue;
127 struct workqueue_struct *crypt_queue;
Milan Broz3f1e9072008-03-28 14:16:07 -0700128
Milan Broz5ebaee62010-08-12 04:14:07 +0100129 char *cipher;
Milan Broz7dbcd132011-01-13 19:59:52 +0000130 char *cipher_string;
Milan Broz5ebaee62010-08-12 04:14:07 +0100131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 struct crypt_iv_operations *iv_gen_ops;
Herbert Xu79066ad2006-12-05 13:41:52 -0800133 union {
Milan Broz60473592009-12-10 23:51:55 +0000134 struct iv_essiv_private essiv;
135 struct iv_benbi_private benbi;
Herbert Xu79066ad2006-12-05 13:41:52 -0800136 } iv_gen_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 sector_t iv_offset;
138 unsigned int iv_size;
139
Milan Brozddd42ed2008-02-08 02:11:07 +0000140 /*
Andi Kleenc0297722011-01-13 19:59:53 +0000141 * Duplicated per cpu state. Access through
142 * per_cpu_ptr() only.
143 */
144 struct crypt_cpu __percpu *cpu;
Milan Brozd1f96422011-01-13 19:59:54 +0000145 unsigned tfms_count;
Andi Kleenc0297722011-01-13 19:59:53 +0000146
147 /*
Milan Brozddd42ed2008-02-08 02:11:07 +0000148 * Layout of each crypto request:
149 *
150 * struct ablkcipher_request
151 * context
152 * padding
153 * struct dm_crypt_request
154 * padding
155 * IV
156 *
157 * The padding is added so that dm_crypt_request and the IV are
158 * correctly aligned.
159 */
160 unsigned int dmreq_start;
Milan Brozddd42ed2008-02-08 02:11:07 +0000161
Milan Broze48d4bb2006-10-03 01:15:37 -0700162 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 unsigned int key_size;
Milan Brozd1f96422011-01-13 19:59:54 +0000164 unsigned int key_parts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 u8 key[0];
166};
167
Milan Broz6a24c712006-10-03 01:15:40 -0700168#define MIN_IOS 16
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169#define MIN_POOL_PAGES 32
170#define MIN_BIO_PAGES 8
171
Christoph Lametere18b8902006-12-06 20:33:20 -0800172static struct kmem_cache *_crypt_io_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100174static void clone_init(struct dm_crypt_io *, struct bio *);
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000175static void kcryptd_queue_crypt(struct dm_crypt_io *io);
Milan Broz2dc53272011-01-13 19:59:54 +0000176static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
Olaf Kirch027581f2007-05-09 02:32:52 -0700177
Andi Kleenc0297722011-01-13 19:59:53 +0000178static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
179{
180 return this_cpu_ptr(cc->cpu);
181}
182
183/*
184 * Use this to access cipher attributes that are the same for each CPU.
185 */
186static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
187{
Milan Brozd1f96422011-01-13 19:59:54 +0000188 return __this_cpu_ptr(cc->cpu)->tfms[0];
Andi Kleenc0297722011-01-13 19:59:53 +0000189}
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * Different IV generation algorithms:
193 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000194 * plain: the initial vector is the 32-bit little-endian version of the sector
Robert P. J. Day3a4fa0a2007-10-19 23:10:43 +0200195 * number, padded with zeros if necessary.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 *
Milan Broz61afef62009-12-10 23:52:25 +0000197 * plain64: the initial vector is the 64-bit little-endian version of the sector
198 * number, padded with zeros if necessary.
199 *
Rik Snel3c164bd2006-09-02 18:17:33 +1000200 * essiv: "encrypted sector|salt initial vector", the sector number is
201 * encrypted with the bulk cipher using a salt as key. The salt
202 * should be derived from the bulk cipher's key via hashing.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 *
Rik Snel48527fa2006-09-03 08:56:39 +1000204 * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
205 * (needed for LRW-32-AES and possible other narrow block modes)
206 *
Ludwig Nussel46b47732007-05-09 02:32:55 -0700207 * null: the initial vector is always zero. Provides compatibility with
208 * obsolete loop_fish2 devices. Do not use for new devices.
209 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * plumb: unimplemented, see:
211 * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
212 */
213
Milan Broz2dc53272011-01-13 19:59:54 +0000214static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
215 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216{
217 memset(iv, 0, cc->iv_size);
Milan Broz2dc53272011-01-13 19:59:54 +0000218 *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 return 0;
221}
222
Milan Broz61afef62009-12-10 23:52:25 +0000223static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
Milan Broz2dc53272011-01-13 19:59:54 +0000224 struct dm_crypt_request *dmreq)
Milan Broz61afef62009-12-10 23:52:25 +0000225{
226 memset(iv, 0, cc->iv_size);
Milan Broz2dc53272011-01-13 19:59:54 +0000227 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
Milan Broz61afef62009-12-10 23:52:25 +0000228
229 return 0;
230}
231
Milan Brozb95bf2d2009-12-10 23:51:56 +0000232/* Initialise ESSIV - compute salt but no local memory allocations */
233static int crypt_iv_essiv_init(struct crypt_config *cc)
234{
235 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
236 struct hash_desc desc;
237 struct scatterlist sg;
Andi Kleenc0297722011-01-13 19:59:53 +0000238 struct crypto_cipher *essiv_tfm;
239 int err, cpu;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000240
241 sg_init_one(&sg, cc->key, cc->key_size);
242 desc.tfm = essiv->hash_tfm;
243 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244
245 err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
246 if (err)
247 return err;
248
Andi Kleenc0297722011-01-13 19:59:53 +0000249 for_each_possible_cpu(cpu) {
250 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
251
252 err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000253 crypto_hash_digestsize(essiv->hash_tfm));
Andi Kleenc0297722011-01-13 19:59:53 +0000254 if (err)
255 return err;
256 }
257
258 return 0;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000259}
260
Milan Broz542da312009-12-10 23:51:57 +0000261/* Wipe salt and reset key derived from volume key */
262static int crypt_iv_essiv_wipe(struct crypt_config *cc)
263{
264 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
265 unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
Andi Kleenc0297722011-01-13 19:59:53 +0000266 struct crypto_cipher *essiv_tfm;
267 int cpu, r, err = 0;
Milan Broz542da312009-12-10 23:51:57 +0000268
269 memset(essiv->salt, 0, salt_size);
270
Andi Kleenc0297722011-01-13 19:59:53 +0000271 for_each_possible_cpu(cpu) {
272 essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
273 r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
274 if (r)
275 err = r;
276 }
277
278 return err;
279}
280
281/* Set up per cpu cipher state */
282static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
283 struct dm_target *ti,
284 u8 *salt, unsigned saltsize)
285{
286 struct crypto_cipher *essiv_tfm;
287 int err;
288
289 /* Setup the essiv_tfm with the given salt */
290 essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
291 if (IS_ERR(essiv_tfm)) {
292 ti->error = "Error allocating crypto tfm for ESSIV";
293 return essiv_tfm;
294 }
295
296 if (crypto_cipher_blocksize(essiv_tfm) !=
297 crypto_ablkcipher_ivsize(any_tfm(cc))) {
298 ti->error = "Block size of ESSIV cipher does "
299 "not match IV size of block cipher";
300 crypto_free_cipher(essiv_tfm);
301 return ERR_PTR(-EINVAL);
302 }
303
304 err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
305 if (err) {
306 ti->error = "Failed to set key for ESSIV cipher";
307 crypto_free_cipher(essiv_tfm);
308 return ERR_PTR(err);
309 }
310
311 return essiv_tfm;
Milan Broz542da312009-12-10 23:51:57 +0000312}
313
Milan Broz60473592009-12-10 23:51:55 +0000314static void crypt_iv_essiv_dtr(struct crypt_config *cc)
315{
Andi Kleenc0297722011-01-13 19:59:53 +0000316 int cpu;
317 struct crypt_cpu *cpu_cc;
318 struct crypto_cipher *essiv_tfm;
Milan Broz60473592009-12-10 23:51:55 +0000319 struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
320
Milan Brozb95bf2d2009-12-10 23:51:56 +0000321 crypto_free_hash(essiv->hash_tfm);
322 essiv->hash_tfm = NULL;
323
324 kzfree(essiv->salt);
325 essiv->salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000326
327 for_each_possible_cpu(cpu) {
328 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
329 essiv_tfm = cpu_cc->iv_private;
330
331 if (essiv_tfm)
332 crypto_free_cipher(essiv_tfm);
333
334 cpu_cc->iv_private = NULL;
335 }
Milan Broz60473592009-12-10 23:51:55 +0000336}
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
Milan Brozd469f842007-10-19 22:42:37 +0100339 const char *opts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340{
Milan Broz5861f1b2009-12-10 23:51:56 +0000341 struct crypto_cipher *essiv_tfm = NULL;
342 struct crypto_hash *hash_tfm = NULL;
Milan Broz5861f1b2009-12-10 23:51:56 +0000343 u8 *salt = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +0000344 int err, cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Milan Broz5861f1b2009-12-10 23:51:56 +0000346 if (!opts) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700347 ti->error = "Digest algorithm missing for ESSIV mode";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return -EINVAL;
349 }
350
Milan Brozb95bf2d2009-12-10 23:51:56 +0000351 /* Allocate hash algorithm */
Herbert Xu35058682006-08-24 19:10:20 +1000352 hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
353 if (IS_ERR(hash_tfm)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700354 ti->error = "Error initializing ESSIV hash";
Milan Broz5861f1b2009-12-10 23:51:56 +0000355 err = PTR_ERR(hash_tfm);
356 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358
Milan Brozb95bf2d2009-12-10 23:51:56 +0000359 salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
Milan Broz5861f1b2009-12-10 23:51:56 +0000360 if (!salt) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700361 ti->error = "Error kmallocing salt storage in ESSIV";
Milan Broz5861f1b2009-12-10 23:51:56 +0000362 err = -ENOMEM;
363 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 }
365
Milan Brozb95bf2d2009-12-10 23:51:56 +0000366 cc->iv_gen_private.essiv.salt = salt;
Milan Brozb95bf2d2009-12-10 23:51:56 +0000367 cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
368
Andi Kleenc0297722011-01-13 19:59:53 +0000369 for_each_possible_cpu(cpu) {
370 essiv_tfm = setup_essiv_cpu(cc, ti, salt,
371 crypto_hash_digestsize(hash_tfm));
372 if (IS_ERR(essiv_tfm)) {
373 crypt_iv_essiv_dtr(cc);
374 return PTR_ERR(essiv_tfm);
375 }
376 per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
377 }
378
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 return 0;
Milan Broz5861f1b2009-12-10 23:51:56 +0000380
381bad:
Milan Broz5861f1b2009-12-10 23:51:56 +0000382 if (hash_tfm && !IS_ERR(hash_tfm))
383 crypto_free_hash(hash_tfm);
Milan Brozb95bf2d2009-12-10 23:51:56 +0000384 kfree(salt);
Milan Broz5861f1b2009-12-10 23:51:56 +0000385 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Milan Broz2dc53272011-01-13 19:59:54 +0000388static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
389 struct dm_crypt_request *dmreq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
Andi Kleenc0297722011-01-13 19:59:53 +0000391 struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 memset(iv, 0, cc->iv_size);
Milan Broz2dc53272011-01-13 19:59:54 +0000394 *(u64 *)iv = cpu_to_le64(dmreq->iv_sector);
Andi Kleenc0297722011-01-13 19:59:53 +0000395 crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return 0;
398}
399
Rik Snel48527fa2006-09-03 08:56:39 +1000400static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
401 const char *opts)
402{
Andi Kleenc0297722011-01-13 19:59:53 +0000403 unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
David Howellsf0d1b0b2006-12-08 02:37:49 -0800404 int log = ilog2(bs);
Rik Snel48527fa2006-09-03 08:56:39 +1000405
406 /* we need to calculate how far we must shift the sector count
407 * to get the cipher block count, we use this shift in _gen */
408
409 if (1 << log != bs) {
410 ti->error = "cypher blocksize is not a power of 2";
411 return -EINVAL;
412 }
413
414 if (log > 9) {
415 ti->error = "cypher blocksize is > 512";
416 return -EINVAL;
417 }
418
Milan Broz60473592009-12-10 23:51:55 +0000419 cc->iv_gen_private.benbi.shift = 9 - log;
Rik Snel48527fa2006-09-03 08:56:39 +1000420
421 return 0;
422}
423
424static void crypt_iv_benbi_dtr(struct crypt_config *cc)
425{
Rik Snel48527fa2006-09-03 08:56:39 +1000426}
427
Milan Broz2dc53272011-01-13 19:59:54 +0000428static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
429 struct dm_crypt_request *dmreq)
Rik Snel48527fa2006-09-03 08:56:39 +1000430{
Herbert Xu79066ad2006-12-05 13:41:52 -0800431 __be64 val;
432
Rik Snel48527fa2006-09-03 08:56:39 +1000433 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
Herbert Xu79066ad2006-12-05 13:41:52 -0800434
Milan Broz2dc53272011-01-13 19:59:54 +0000435 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
Herbert Xu79066ad2006-12-05 13:41:52 -0800436 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
Rik Snel48527fa2006-09-03 08:56:39 +1000437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 return 0;
439}
440
Milan Broz2dc53272011-01-13 19:59:54 +0000441static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
442 struct dm_crypt_request *dmreq)
Ludwig Nussel46b47732007-05-09 02:32:55 -0700443{
444 memset(iv, 0, cc->iv_size);
445
446 return 0;
447}
448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449static struct crypt_iv_operations crypt_iv_plain_ops = {
450 .generator = crypt_iv_plain_gen
451};
452
Milan Broz61afef62009-12-10 23:52:25 +0000453static struct crypt_iv_operations crypt_iv_plain64_ops = {
454 .generator = crypt_iv_plain64_gen
455};
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457static struct crypt_iv_operations crypt_iv_essiv_ops = {
458 .ctr = crypt_iv_essiv_ctr,
459 .dtr = crypt_iv_essiv_dtr,
Milan Brozb95bf2d2009-12-10 23:51:56 +0000460 .init = crypt_iv_essiv_init,
Milan Broz542da312009-12-10 23:51:57 +0000461 .wipe = crypt_iv_essiv_wipe,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 .generator = crypt_iv_essiv_gen
463};
464
Rik Snel48527fa2006-09-03 08:56:39 +1000465static struct crypt_iv_operations crypt_iv_benbi_ops = {
466 .ctr = crypt_iv_benbi_ctr,
467 .dtr = crypt_iv_benbi_dtr,
468 .generator = crypt_iv_benbi_gen
469};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Ludwig Nussel46b47732007-05-09 02:32:55 -0700471static struct crypt_iv_operations crypt_iv_null_ops = {
472 .generator = crypt_iv_null_gen
473};
474
Milan Brozd469f842007-10-19 22:42:37 +0100475static void crypt_convert_init(struct crypt_config *cc,
476 struct convert_context *ctx,
477 struct bio *bio_out, struct bio *bio_in,
Milan Brozfcd369d2008-02-08 02:10:41 +0000478 sector_t sector)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
480 ctx->bio_in = bio_in;
481 ctx->bio_out = bio_out;
482 ctx->offset_in = 0;
483 ctx->offset_out = 0;
484 ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
485 ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
486 ctx->sector = sector + cc->iv_offset;
Milan Broz43d69032008-02-08 02:11:09 +0000487 init_completion(&ctx->restart);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
Huang Yingb2174ee2009-03-16 17:44:33 +0000490static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
491 struct ablkcipher_request *req)
492{
493 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
494}
495
496static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
497 struct dm_crypt_request *dmreq)
498{
499 return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
500}
501
Milan Broz2dc53272011-01-13 19:59:54 +0000502static u8 *iv_of_dmreq(struct crypt_config *cc,
503 struct dm_crypt_request *dmreq)
504{
505 return (u8 *)ALIGN((unsigned long)(dmreq + 1),
506 crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
507}
508
Milan Broz01482b72008-02-08 02:11:04 +0000509static int crypt_convert_block(struct crypt_config *cc,
Milan Broz3a7f6c92008-02-08 02:11:14 +0000510 struct convert_context *ctx,
511 struct ablkcipher_request *req)
Milan Broz01482b72008-02-08 02:11:04 +0000512{
513 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
514 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000515 struct dm_crypt_request *dmreq;
516 u8 *iv;
517 int r = 0;
Milan Broz01482b72008-02-08 02:11:04 +0000518
Huang Yingb2174ee2009-03-16 17:44:33 +0000519 dmreq = dmreq_of_req(cc, req);
Milan Broz2dc53272011-01-13 19:59:54 +0000520 iv = iv_of_dmreq(cc, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000521
Milan Broz2dc53272011-01-13 19:59:54 +0000522 dmreq->iv_sector = ctx->sector;
Huang Yingb2174ee2009-03-16 17:44:33 +0000523 dmreq->ctx = ctx;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000524 sg_init_table(&dmreq->sg_in, 1);
525 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
Milan Broz01482b72008-02-08 02:11:04 +0000526 bv_in->bv_offset + ctx->offset_in);
527
Milan Broz3a7f6c92008-02-08 02:11:14 +0000528 sg_init_table(&dmreq->sg_out, 1);
529 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
Milan Broz01482b72008-02-08 02:11:04 +0000530 bv_out->bv_offset + ctx->offset_out);
531
532 ctx->offset_in += 1 << SECTOR_SHIFT;
533 if (ctx->offset_in >= bv_in->bv_len) {
534 ctx->offset_in = 0;
535 ctx->idx_in++;
536 }
537
538 ctx->offset_out += 1 << SECTOR_SHIFT;
539 if (ctx->offset_out >= bv_out->bv_len) {
540 ctx->offset_out = 0;
541 ctx->idx_out++;
542 }
543
Milan Broz3a7f6c92008-02-08 02:11:14 +0000544 if (cc->iv_gen_ops) {
Milan Broz2dc53272011-01-13 19:59:54 +0000545 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000546 if (r < 0)
547 return r;
548 }
549
550 ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
551 1 << SECTOR_SHIFT, iv);
552
553 if (bio_data_dir(ctx->bio_in) == WRITE)
554 r = crypto_ablkcipher_encrypt(req);
555 else
556 r = crypto_ablkcipher_decrypt(req);
557
Milan Broz2dc53272011-01-13 19:59:54 +0000558 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
559 r = cc->iv_gen_ops->post(cc, iv, dmreq);
560
Milan Broz3a7f6c92008-02-08 02:11:14 +0000561 return r;
Milan Broz01482b72008-02-08 02:11:04 +0000562}
563
Milan Broz95497a92008-02-08 02:11:12 +0000564static void kcryptd_async_done(struct crypto_async_request *async_req,
565 int error);
Andi Kleenc0297722011-01-13 19:59:53 +0000566
Milan Brozddd42ed2008-02-08 02:11:07 +0000567static void crypt_alloc_req(struct crypt_config *cc,
568 struct convert_context *ctx)
569{
Andi Kleenc0297722011-01-13 19:59:53 +0000570 struct crypt_cpu *this_cc = this_crypt_config(cc);
Milan Brozd1f96422011-01-13 19:59:54 +0000571 unsigned key_index = ctx->sector & (cc->tfms_count - 1);
Andi Kleenc0297722011-01-13 19:59:53 +0000572
573 if (!this_cc->req)
574 this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
575
Milan Brozd1f96422011-01-13 19:59:54 +0000576 ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
Andi Kleenc0297722011-01-13 19:59:53 +0000577 ablkcipher_request_set_callback(this_cc->req,
578 CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
579 kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
Milan Brozddd42ed2008-02-08 02:11:07 +0000580}
581
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582/*
583 * Encrypt / decrypt data from one bio to another one (can be the same one)
584 */
585static int crypt_convert(struct crypt_config *cc,
Milan Brozd469f842007-10-19 22:42:37 +0100586 struct convert_context *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Andi Kleenc0297722011-01-13 19:59:53 +0000588 struct crypt_cpu *this_cc = this_crypt_config(cc);
Milan Broz3f1e9072008-03-28 14:16:07 -0700589 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Milan Brozc8081612008-10-10 13:37:08 +0100591 atomic_set(&ctx->pending, 1);
592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
594 ctx->idx_out < ctx->bio_out->bi_vcnt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595
Milan Broz3a7f6c92008-02-08 02:11:14 +0000596 crypt_alloc_req(cc, ctx);
597
Milan Broz3f1e9072008-03-28 14:16:07 -0700598 atomic_inc(&ctx->pending);
599
Andi Kleenc0297722011-01-13 19:59:53 +0000600 r = crypt_convert_block(cc, ctx, this_cc->req);
Milan Broz3a7f6c92008-02-08 02:11:14 +0000601
602 switch (r) {
Milan Broz3f1e9072008-03-28 14:16:07 -0700603 /* async */
Milan Broz3a7f6c92008-02-08 02:11:14 +0000604 case -EBUSY:
605 wait_for_completion(&ctx->restart);
606 INIT_COMPLETION(ctx->restart);
607 /* fall through*/
608 case -EINPROGRESS:
Andi Kleenc0297722011-01-13 19:59:53 +0000609 this_cc->req = NULL;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000610 ctx->sector++;
611 continue;
Milan Broz3a7f6c92008-02-08 02:11:14 +0000612
Milan Broz3f1e9072008-03-28 14:16:07 -0700613 /* sync */
614 case 0:
615 atomic_dec(&ctx->pending);
616 ctx->sector++;
Milan Brozc7f1b202008-07-02 09:34:28 +0100617 cond_resched();
Milan Broz3f1e9072008-03-28 14:16:07 -0700618 continue;
619
620 /* error */
621 default:
622 atomic_dec(&ctx->pending);
623 return r;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
626
Milan Broz3f1e9072008-03-28 14:16:07 -0700627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
Milan Brozd469f842007-10-19 22:42:37 +0100630static void dm_crypt_bio_destructor(struct bio *bio)
631{
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100632 struct dm_crypt_io *io = bio->bi_private;
Milan Broz6a24c712006-10-03 01:15:40 -0700633 struct crypt_config *cc = io->target->private;
634
635 bio_free(bio, cc->bs);
Milan Brozd469f842007-10-19 22:42:37 +0100636}
Milan Broz6a24c712006-10-03 01:15:40 -0700637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638/*
639 * Generate a new unfragmented bio with the given size
640 * This should never violate the device limitations
Milan Broz933f01d2008-10-10 13:37:08 +0100641 * May return a smaller bio when running out of pages, indicated by
642 * *out_of_pages set to 1.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 */
Milan Broz933f01d2008-10-10 13:37:08 +0100644static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
645 unsigned *out_of_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
Olaf Kirch027581f2007-05-09 02:32:52 -0700647 struct crypt_config *cc = io->target->private;
Milan Broz8b004452006-10-03 01:15:37 -0700648 struct bio *clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Al Virob4e3ca12005-10-21 03:22:34 -0400650 gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
Milan Broz91e10622007-12-13 14:16:10 +0000651 unsigned i, len;
652 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
Olaf Kirch2f9941b2007-05-09 02:32:53 -0700654 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
Milan Broz8b004452006-10-03 01:15:37 -0700655 if (!clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Olaf Kirch027581f2007-05-09 02:32:52 -0700658 clone_init(io, clone);
Milan Broz933f01d2008-10-10 13:37:08 +0100659 *out_of_pages = 0;
Milan Broz6a24c712006-10-03 01:15:40 -0700660
Olaf Kirchf97380b2007-05-09 02:32:54 -0700661 for (i = 0; i < nr_iovecs; i++) {
Milan Broz91e10622007-12-13 14:16:10 +0000662 page = mempool_alloc(cc->page_pool, gfp_mask);
Milan Broz933f01d2008-10-10 13:37:08 +0100663 if (!page) {
664 *out_of_pages = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 break;
Milan Broz933f01d2008-10-10 13:37:08 +0100666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 /*
669 * if additional pages cannot be allocated without waiting,
670 * return a partially allocated bio, the caller will then try
671 * to allocate additional bios while submitting this partial bio
672 */
Olaf Kirchf97380b2007-05-09 02:32:54 -0700673 if (i == (MIN_BIO_PAGES - 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
675
Milan Broz91e10622007-12-13 14:16:10 +0000676 len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Milan Broz91e10622007-12-13 14:16:10 +0000678 if (!bio_add_page(clone, page, len, 0)) {
679 mempool_free(page, cc->page_pool);
680 break;
681 }
682
683 size -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 }
685
Milan Broz8b004452006-10-03 01:15:37 -0700686 if (!clone->bi_size) {
687 bio_put(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return NULL;
689 }
690
Milan Broz8b004452006-10-03 01:15:37 -0700691 return clone;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
Neil Brown644bd2f2007-10-16 13:48:46 +0200694static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Neil Brown644bd2f2007-10-16 13:48:46 +0200696 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 struct bio_vec *bv;
698
Neil Brown644bd2f2007-10-16 13:48:46 +0200699 for (i = 0; i < clone->bi_vcnt; i++) {
Milan Broz8b004452006-10-03 01:15:37 -0700700 bv = bio_iovec_idx(clone, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 BUG_ON(!bv->bv_page);
702 mempool_free(bv->bv_page, cc->page_pool);
703 bv->bv_page = NULL;
704 }
705}
706
Milan Brozdc440d1e2008-10-10 13:37:03 +0100707static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
708 struct bio *bio, sector_t sector)
709{
710 struct crypt_config *cc = ti->private;
711 struct dm_crypt_io *io;
712
713 io = mempool_alloc(cc->io_pool, GFP_NOIO);
714 io->target = ti;
715 io->base_bio = bio;
716 io->sector = sector;
717 io->error = 0;
Milan Broz393b47e2008-10-21 17:45:02 +0100718 io->base_io = NULL;
Milan Brozdc440d1e2008-10-10 13:37:03 +0100719 atomic_set(&io->pending, 0);
720
721 return io;
722}
723
Milan Broz3e1a8bd2008-10-10 13:37:02 +0100724static void crypt_inc_pending(struct dm_crypt_io *io)
725{
726 atomic_inc(&io->pending);
727}
728
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729/*
730 * One of the bios was finished. Check for completion of
731 * the whole request and correctly clean up the buffer.
Milan Broz393b47e2008-10-21 17:45:02 +0100732 * If base_io is set, wait for the last fragment to complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 */
Milan Broz5742fd72008-02-08 02:10:43 +0000734static void crypt_dec_pending(struct dm_crypt_io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
Milan Broz5742fd72008-02-08 02:10:43 +0000736 struct crypt_config *cc = io->target->private;
Milan Brozb35f8ca2009-03-16 17:44:36 +0000737 struct bio *base_bio = io->base_bio;
738 struct dm_crypt_io *base_io = io->base_io;
739 int error = io->error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
741 if (!atomic_dec_and_test(&io->pending))
742 return;
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 mempool_free(io, cc->io_pool);
Milan Brozb35f8ca2009-03-16 17:44:36 +0000745
746 if (likely(!base_io))
747 bio_endio(base_bio, error);
748 else {
749 if (error && !base_io->error)
750 base_io->error = error;
751 crypt_dec_pending(base_io);
752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753}
754
755/*
Milan Brozcabf08e2007-10-19 22:38:58 +0100756 * kcryptd/kcryptd_io:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 *
758 * Needed because it would be very unwise to do decryption in an
Milan Broz23541d22006-10-03 01:15:39 -0700759 * interrupt context.
Milan Brozcabf08e2007-10-19 22:38:58 +0100760 *
761 * kcryptd performs the actual encryption or decryption.
762 *
763 * kcryptd_io performs the IO submission.
764 *
765 * They must be separated as otherwise the final stages could be
766 * starved by new requests which can block in the first stages due
767 * to memory allocation.
Andi Kleenc0297722011-01-13 19:59:53 +0000768 *
769 * The work is done per CPU global for all dm-crypt instances.
770 * They should not depend on each other and do not block.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 */
NeilBrown6712ecf2007-09-27 12:47:43 +0200772static void crypt_endio(struct bio *clone, int error)
Milan Broz8b004452006-10-03 01:15:37 -0700773{
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100774 struct dm_crypt_io *io = clone->bi_private;
Milan Broz8b004452006-10-03 01:15:37 -0700775 struct crypt_config *cc = io->target->private;
Milan Brozee7a4912008-02-08 02:10:46 +0000776 unsigned rw = bio_data_dir(clone);
Milan Broz8b004452006-10-03 01:15:37 -0700777
Milan Brozadfe4772007-12-13 14:15:51 +0000778 if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
779 error = -EIO;
780
Milan Broz8b004452006-10-03 01:15:37 -0700781 /*
NeilBrown6712ecf2007-09-27 12:47:43 +0200782 * free the processed pages
Milan Broz8b004452006-10-03 01:15:37 -0700783 */
Milan Brozee7a4912008-02-08 02:10:46 +0000784 if (rw == WRITE)
Neil Brown644bd2f2007-10-16 13:48:46 +0200785 crypt_free_buffer_pages(cc, clone);
Milan Brozee7a4912008-02-08 02:10:46 +0000786
787 bio_put(clone);
788
789 if (rw == READ && !error) {
790 kcryptd_queue_crypt(io);
791 return;
NeilBrown6712ecf2007-09-27 12:47:43 +0200792 }
Milan Broz8b004452006-10-03 01:15:37 -0700793
Milan Brozadfe4772007-12-13 14:15:51 +0000794 if (unlikely(error))
Milan Broz5742fd72008-02-08 02:10:43 +0000795 io->error = error;
796
797 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -0700798}
799
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100800static void clone_init(struct dm_crypt_io *io, struct bio *clone)
Milan Broz8b004452006-10-03 01:15:37 -0700801{
802 struct crypt_config *cc = io->target->private;
803
804 clone->bi_private = io;
805 clone->bi_end_io = crypt_endio;
806 clone->bi_bdev = cc->dev->bdev;
807 clone->bi_rw = io->base_bio->bi_rw;
Olaf Kirch027581f2007-05-09 02:32:52 -0700808 clone->bi_destructor = dm_crypt_bio_destructor;
Milan Broz8b004452006-10-03 01:15:37 -0700809}
810
Milan Broz20c82532011-01-13 19:59:53 +0000811static void kcryptd_unplug(struct crypt_config *cc)
812{
813 blk_unplug(bdev_get_queue(cc->dev->bdev));
814}
815
816static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
Milan Broz8b004452006-10-03 01:15:37 -0700817{
818 struct crypt_config *cc = io->target->private;
819 struct bio *base_bio = io->base_bio;
820 struct bio *clone;
Milan Broz93e605c2006-10-03 01:15:38 -0700821
Milan Broz8b004452006-10-03 01:15:37 -0700822 /*
823 * The block layer might modify the bvec array, so always
824 * copy the required bvecs because we need the original
825 * one in order to decrypt the whole bio data *afterwards*.
826 */
Milan Broz20c82532011-01-13 19:59:53 +0000827 clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
828 if (!clone) {
829 kcryptd_unplug(cc);
830 return 1;
Milan Broz93e605c2006-10-03 01:15:38 -0700831 }
Milan Broz8b004452006-10-03 01:15:37 -0700832
Milan Broz20c82532011-01-13 19:59:53 +0000833 crypt_inc_pending(io);
834
Milan Broz8b004452006-10-03 01:15:37 -0700835 clone_init(io, clone);
836 clone->bi_idx = 0;
837 clone->bi_vcnt = bio_segments(base_bio);
838 clone->bi_size = base_bio->bi_size;
Milan Broz0c395b02008-02-08 02:10:54 +0000839 clone->bi_sector = cc->start + io->sector;
Milan Broz8b004452006-10-03 01:15:37 -0700840 memcpy(clone->bi_io_vec, bio_iovec(base_bio),
841 sizeof(struct bio_vec) * clone->bi_vcnt);
Milan Broz8b004452006-10-03 01:15:37 -0700842
Milan Broz93e605c2006-10-03 01:15:38 -0700843 generic_make_request(clone);
Milan Broz20c82532011-01-13 19:59:53 +0000844 return 0;
Milan Broz8b004452006-10-03 01:15:37 -0700845}
846
Milan Broz4e4eef62008-02-08 02:10:49 +0000847static void kcryptd_io_write(struct dm_crypt_io *io)
848{
Milan Broz95497a92008-02-08 02:11:12 +0000849 struct bio *clone = io->ctx.bio_out;
Milan Broz95497a92008-02-08 02:11:12 +0000850 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +0000851}
852
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000853static void kcryptd_io(struct work_struct *work)
854{
855 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
856
Milan Broz20c82532011-01-13 19:59:53 +0000857 if (bio_data_dir(io->base_bio) == READ) {
858 crypt_inc_pending(io);
859 if (kcryptd_io_read(io, GFP_NOIO))
860 io->error = -ENOMEM;
861 crypt_dec_pending(io);
862 } else
Alasdair G Kergon395b1672008-02-08 02:10:52 +0000863 kcryptd_io_write(io);
864}
865
866static void kcryptd_queue_io(struct dm_crypt_io *io)
867{
868 struct crypt_config *cc = io->target->private;
869
870 INIT_WORK(&io->work, kcryptd_io);
871 queue_work(cc->io_queue, &io->work);
872}
873
Milan Broz95497a92008-02-08 02:11:12 +0000874static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
875 int error, int async)
Milan Broz4e4eef62008-02-08 02:10:49 +0000876{
Milan Brozdec1ced2008-02-08 02:10:57 +0000877 struct bio *clone = io->ctx.bio_out;
878 struct crypt_config *cc = io->target->private;
879
880 if (unlikely(error < 0)) {
881 crypt_free_buffer_pages(cc, clone);
882 bio_put(clone);
883 io->error = -EIO;
Milan Broz6c031f42008-10-10 13:37:06 +0100884 crypt_dec_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +0000885 return;
886 }
887
888 /* crypt_convert should have filled the clone bio */
889 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
890
891 clone->bi_sector = cc->start + io->sector;
Milan Broz899c95d2008-02-08 02:11:02 +0000892
Milan Broz95497a92008-02-08 02:11:12 +0000893 if (async)
894 kcryptd_queue_io(io);
Alasdair G Kergon1e37bb82008-10-10 13:37:05 +0100895 else
Milan Broz95497a92008-02-08 02:11:12 +0000896 generic_make_request(clone);
Milan Broz4e4eef62008-02-08 02:10:49 +0000897}
898
Milan Brozfc5a5e92008-10-10 13:37:04 +0100899static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -0700900{
901 struct crypt_config *cc = io->target->private;
Milan Broz8b004452006-10-03 01:15:37 -0700902 struct bio *clone;
Milan Broz393b47e2008-10-21 17:45:02 +0100903 struct dm_crypt_io *new_io;
Milan Brozc8081612008-10-10 13:37:08 +0100904 int crypt_finished;
Milan Broz933f01d2008-10-10 13:37:08 +0100905 unsigned out_of_pages = 0;
Milan Brozdec1ced2008-02-08 02:10:57 +0000906 unsigned remaining = io->base_bio->bi_size;
Milan Brozb635b002008-10-21 17:45:00 +0100907 sector_t sector = io->sector;
Milan Brozdec1ced2008-02-08 02:10:57 +0000908 int r;
Milan Broz8b004452006-10-03 01:15:37 -0700909
Milan Broz93e605c2006-10-03 01:15:38 -0700910 /*
Milan Brozfc5a5e92008-10-10 13:37:04 +0100911 * Prevent io from disappearing until this function completes.
912 */
913 crypt_inc_pending(io);
Milan Brozb635b002008-10-21 17:45:00 +0100914 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
Milan Brozfc5a5e92008-10-10 13:37:04 +0100915
916 /*
Milan Broz93e605c2006-10-03 01:15:38 -0700917 * The allocated buffers can be smaller than the whole bio,
918 * so repeat the whole process until all the data can be handled.
919 */
920 while (remaining) {
Milan Broz933f01d2008-10-10 13:37:08 +0100921 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
Milan Broz23541d22006-10-03 01:15:39 -0700922 if (unlikely(!clone)) {
Milan Broz5742fd72008-02-08 02:10:43 +0000923 io->error = -ENOMEM;
Milan Brozfc5a5e92008-10-10 13:37:04 +0100924 break;
Milan Broz23541d22006-10-03 01:15:39 -0700925 }
Milan Broz93e605c2006-10-03 01:15:38 -0700926
Milan Broz53017032008-02-08 02:10:38 +0000927 io->ctx.bio_out = clone;
928 io->ctx.idx_out = 0;
Milan Broz93e605c2006-10-03 01:15:38 -0700929
Milan Broz93e605c2006-10-03 01:15:38 -0700930 remaining -= clone->bi_size;
Milan Brozb635b002008-10-21 17:45:00 +0100931 sector += bio_sectors(clone);
Milan Brozdec1ced2008-02-08 02:10:57 +0000932
Milan Broz4e594092008-10-10 13:37:07 +0100933 crypt_inc_pending(io);
Milan Brozdec1ced2008-02-08 02:10:57 +0000934 r = crypt_convert(cc, &io->ctx);
Milan Brozc8081612008-10-10 13:37:08 +0100935 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
Milan Brozdec1ced2008-02-08 02:10:57 +0000936
Milan Brozc8081612008-10-10 13:37:08 +0100937 /* Encryption was already finished, submit io now */
938 if (crypt_finished) {
Milan Broz3a7f6c92008-02-08 02:11:14 +0000939 kcryptd_crypt_write_io_submit(io, r, 0);
Milan Brozc8081612008-10-10 13:37:08 +0100940
941 /*
942 * If there was an error, do not try next fragments.
943 * For async, error is processed in async handler.
944 */
Milan Broz6c031f42008-10-10 13:37:06 +0100945 if (unlikely(r < 0))
Milan Brozfc5a5e92008-10-10 13:37:04 +0100946 break;
Milan Brozb635b002008-10-21 17:45:00 +0100947
948 io->sector = sector;
Milan Broz4e594092008-10-10 13:37:07 +0100949 }
Milan Broz93e605c2006-10-03 01:15:38 -0700950
Milan Broz933f01d2008-10-10 13:37:08 +0100951 /*
952 * Out of memory -> run queues
953 * But don't wait if split was due to the io size restriction
954 */
955 if (unlikely(out_of_pages))
Jens Axboe8aa7e842009-07-09 14:52:32 +0200956 congestion_wait(BLK_RW_ASYNC, HZ/100);
Milan Broz933f01d2008-10-10 13:37:08 +0100957
Milan Broz393b47e2008-10-21 17:45:02 +0100958 /*
959 * With async crypto it is unsafe to share the crypto context
960 * between fragments, so switch to a new dm_crypt_io structure.
961 */
962 if (unlikely(!crypt_finished && remaining)) {
963 new_io = crypt_io_alloc(io->target, io->base_bio,
964 sector);
965 crypt_inc_pending(new_io);
966 crypt_convert_init(cc, &new_io->ctx, NULL,
967 io->base_bio, sector);
968 new_io->ctx.idx_in = io->ctx.idx_in;
969 new_io->ctx.offset_in = io->ctx.offset_in;
970
971 /*
972 * Fragments after the first use the base_io
973 * pending count.
974 */
975 if (!io->base_io)
976 new_io->base_io = io;
977 else {
978 new_io->base_io = io->base_io;
979 crypt_inc_pending(io->base_io);
980 crypt_dec_pending(io);
981 }
982
983 io = new_io;
984 }
Milan Broz8b004452006-10-03 01:15:37 -0700985 }
Milan Broz899c95d2008-02-08 02:11:02 +0000986
987 crypt_dec_pending(io);
Milan Broz84131db2008-02-08 02:10:59 +0000988}
989
Milan Broz4e4eef62008-02-08 02:10:49 +0000990static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
Milan Broz5742fd72008-02-08 02:10:43 +0000991{
992 if (unlikely(error < 0))
993 io->error = -EIO;
994
995 crypt_dec_pending(io);
996}
997
Milan Broz4e4eef62008-02-08 02:10:49 +0000998static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
Milan Broz8b004452006-10-03 01:15:37 -0700999{
1000 struct crypt_config *cc = io->target->private;
Milan Broz5742fd72008-02-08 02:10:43 +00001001 int r = 0;
Milan Broz8b004452006-10-03 01:15:37 -07001002
Milan Broz3e1a8bd2008-10-10 13:37:02 +01001003 crypt_inc_pending(io);
Milan Broz3a7f6c92008-02-08 02:11:14 +00001004
Milan Broz53017032008-02-08 02:10:38 +00001005 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
Milan Broz0c395b02008-02-08 02:10:54 +00001006 io->sector);
Milan Broz8b004452006-10-03 01:15:37 -07001007
Milan Broz5742fd72008-02-08 02:10:43 +00001008 r = crypt_convert(cc, &io->ctx);
1009
Milan Broz3f1e9072008-03-28 14:16:07 -07001010 if (atomic_dec_and_test(&io->ctx.pending))
Milan Broz3a7f6c92008-02-08 02:11:14 +00001011 kcryptd_crypt_read_done(io, r);
1012
1013 crypt_dec_pending(io);
Milan Broz8b004452006-10-03 01:15:37 -07001014}
1015
Milan Broz95497a92008-02-08 02:11:12 +00001016static void kcryptd_async_done(struct crypto_async_request *async_req,
1017 int error)
1018{
Huang Yingb2174ee2009-03-16 17:44:33 +00001019 struct dm_crypt_request *dmreq = async_req->data;
1020 struct convert_context *ctx = dmreq->ctx;
Milan Broz95497a92008-02-08 02:11:12 +00001021 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1022 struct crypt_config *cc = io->target->private;
1023
1024 if (error == -EINPROGRESS) {
1025 complete(&ctx->restart);
1026 return;
1027 }
1028
Milan Broz2dc53272011-01-13 19:59:54 +00001029 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
1030 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
1031
Huang Yingb2174ee2009-03-16 17:44:33 +00001032 mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
Milan Broz95497a92008-02-08 02:11:12 +00001033
1034 if (!atomic_dec_and_test(&ctx->pending))
1035 return;
1036
1037 if (bio_data_dir(io->base_bio) == READ)
1038 kcryptd_crypt_read_done(io, error);
1039 else
1040 kcryptd_crypt_write_io_submit(io, error, 1);
1041}
1042
Milan Broz4e4eef62008-02-08 02:10:49 +00001043static void kcryptd_crypt(struct work_struct *work)
1044{
1045 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1046
1047 if (bio_data_dir(io->base_bio) == READ)
1048 kcryptd_crypt_read_convert(io);
1049 else
1050 kcryptd_crypt_write_convert(io);
Milan Broz8b004452006-10-03 01:15:37 -07001051}
1052
Alasdair G Kergon395b1672008-02-08 02:10:52 +00001053static void kcryptd_queue_crypt(struct dm_crypt_io *io)
1054{
1055 struct crypt_config *cc = io->target->private;
1056
1057 INIT_WORK(&io->work, kcryptd_crypt);
1058 queue_work(cc->crypt_queue, &io->work);
1059}
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061/*
1062 * Decode key from its hex representation
1063 */
1064static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
1065{
1066 char buffer[3];
1067 char *endp;
1068 unsigned int i;
1069
1070 buffer[2] = '\0';
1071
Milan Broz8b004452006-10-03 01:15:37 -07001072 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 buffer[0] = *hex++;
1074 buffer[1] = *hex++;
1075
1076 key[i] = (u8)simple_strtoul(buffer, &endp, 16);
1077
1078 if (endp != &buffer[2])
1079 return -EINVAL;
1080 }
1081
1082 if (*hex != '\0')
1083 return -EINVAL;
1084
1085 return 0;
1086}
1087
1088/*
1089 * Encode key into its hex representation
1090 */
1091static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
1092{
1093 unsigned int i;
1094
Milan Broz8b004452006-10-03 01:15:37 -07001095 for (i = 0; i < size; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 sprintf(hex, "%02x", *key);
1097 hex += 2;
1098 key++;
1099 }
1100}
1101
Milan Brozd1f96422011-01-13 19:59:54 +00001102static void crypt_free_tfms(struct crypt_config *cc, int cpu)
1103{
1104 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1105 unsigned i;
1106
1107 for (i = 0; i < cc->tfms_count; i++)
1108 if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
1109 crypto_free_ablkcipher(cpu_cc->tfms[i]);
1110 cpu_cc->tfms[i] = NULL;
1111 }
1112}
1113
1114static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
1115{
1116 struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1117 unsigned i;
1118 int err;
1119
1120 for (i = 0; i < cc->tfms_count; i++) {
1121 cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
1122 if (IS_ERR(cpu_cc->tfms[i])) {
1123 err = PTR_ERR(cpu_cc->tfms[i]);
1124 crypt_free_tfms(cc, cpu);
1125 return err;
1126 }
1127 }
1128
1129 return 0;
1130}
1131
Andi Kleenc0297722011-01-13 19:59:53 +00001132static int crypt_setkey_allcpus(struct crypt_config *cc)
1133{
Milan Brozd1f96422011-01-13 19:59:54 +00001134 unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
1135 int cpu, err = 0, i, r;
Andi Kleenc0297722011-01-13 19:59:53 +00001136
1137 for_each_possible_cpu(cpu) {
Milan Brozd1f96422011-01-13 19:59:54 +00001138 for (i = 0; i < cc->tfms_count; i++) {
1139 r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
1140 cc->key + (i * subkey_size), subkey_size);
1141 if (r)
1142 err = r;
1143 }
Andi Kleenc0297722011-01-13 19:59:53 +00001144 }
1145
1146 return err;
1147}
1148
Milan Broze48d4bb2006-10-03 01:15:37 -07001149static int crypt_set_key(struct crypt_config *cc, char *key)
1150{
Milan Broz69a8cfc2011-01-13 19:59:49 +00001151 /* The key size may not be changed. */
1152 if (cc->key_size != (strlen(key) >> 1))
Milan Broze48d4bb2006-10-03 01:15:37 -07001153 return -EINVAL;
1154
Milan Broz69a8cfc2011-01-13 19:59:49 +00001155 /* Hyphen (which gives a key_size of zero) means there is no key. */
1156 if (!cc->key_size && strcmp(key, "-"))
1157 return -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001158
Milan Broz69a8cfc2011-01-13 19:59:49 +00001159 if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
Milan Broze48d4bb2006-10-03 01:15:37 -07001160 return -EINVAL;
1161
1162 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1163
Andi Kleenc0297722011-01-13 19:59:53 +00001164 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001165}
1166
1167static int crypt_wipe_key(struct crypt_config *cc)
1168{
1169 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
1170 memset(&cc->key, 0, cc->key_size * sizeof(u8));
Andi Kleenc0297722011-01-13 19:59:53 +00001171
1172 return crypt_setkey_allcpus(cc);
Milan Broze48d4bb2006-10-03 01:15:37 -07001173}
1174
Milan Broz28513fc2010-08-12 04:14:06 +01001175static void crypt_dtr(struct dm_target *ti)
1176{
1177 struct crypt_config *cc = ti->private;
Andi Kleenc0297722011-01-13 19:59:53 +00001178 struct crypt_cpu *cpu_cc;
1179 int cpu;
Milan Broz28513fc2010-08-12 04:14:06 +01001180
1181 ti->private = NULL;
1182
1183 if (!cc)
1184 return;
1185
1186 if (cc->io_queue)
1187 destroy_workqueue(cc->io_queue);
1188 if (cc->crypt_queue)
1189 destroy_workqueue(cc->crypt_queue);
1190
Andi Kleenc0297722011-01-13 19:59:53 +00001191 if (cc->cpu)
1192 for_each_possible_cpu(cpu) {
1193 cpu_cc = per_cpu_ptr(cc->cpu, cpu);
1194 if (cpu_cc->req)
1195 mempool_free(cpu_cc->req, cc->req_pool);
Milan Brozd1f96422011-01-13 19:59:54 +00001196 crypt_free_tfms(cc, cpu);
Andi Kleenc0297722011-01-13 19:59:53 +00001197 }
1198
Milan Broz28513fc2010-08-12 04:14:06 +01001199 if (cc->bs)
1200 bioset_free(cc->bs);
1201
1202 if (cc->page_pool)
1203 mempool_destroy(cc->page_pool);
1204 if (cc->req_pool)
1205 mempool_destroy(cc->req_pool);
1206 if (cc->io_pool)
1207 mempool_destroy(cc->io_pool);
1208
1209 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
1210 cc->iv_gen_ops->dtr(cc);
1211
Milan Broz28513fc2010-08-12 04:14:06 +01001212 if (cc->dev)
1213 dm_put_device(ti, cc->dev);
1214
Andi Kleenc0297722011-01-13 19:59:53 +00001215 if (cc->cpu)
1216 free_percpu(cc->cpu);
1217
Milan Broz5ebaee62010-08-12 04:14:07 +01001218 kzfree(cc->cipher);
Milan Broz7dbcd132011-01-13 19:59:52 +00001219 kzfree(cc->cipher_string);
Milan Broz28513fc2010-08-12 04:14:06 +01001220
1221 /* Must zero key material before freeing */
1222 kzfree(cc);
1223}
1224
Milan Broz5ebaee62010-08-12 04:14:07 +01001225static int crypt_ctr_cipher(struct dm_target *ti,
1226 char *cipher_in, char *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227{
Milan Broz5ebaee62010-08-12 04:14:07 +01001228 struct crypt_config *cc = ti->private;
Milan Brozd1f96422011-01-13 19:59:54 +00001229 char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
Milan Broz5ebaee62010-08-12 04:14:07 +01001230 char *cipher_api = NULL;
Andi Kleenc0297722011-01-13 19:59:53 +00001231 int cpu, ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Milan Broz5ebaee62010-08-12 04:14:07 +01001233 /* Convert to crypto api definition? */
1234 if (strchr(cipher_in, '(')) {
1235 ti->error = "Bad cipher specification";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 return -EINVAL;
1237 }
1238
Milan Broz7dbcd132011-01-13 19:59:52 +00001239 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
1240 if (!cc->cipher_string)
1241 goto bad_mem;
1242
Milan Broz5ebaee62010-08-12 04:14:07 +01001243 /*
1244 * Legacy dm-crypt cipher specification
Milan Brozd1f96422011-01-13 19:59:54 +00001245 * cipher[:keycount]-mode-iv:ivopts
Milan Broz5ebaee62010-08-12 04:14:07 +01001246 */
1247 tmp = cipher_in;
Milan Brozd1f96422011-01-13 19:59:54 +00001248 keycount = strsep(&tmp, "-");
1249 cipher = strsep(&keycount, ":");
1250
1251 if (!keycount)
1252 cc->tfms_count = 1;
1253 else if (sscanf(keycount, "%u", &cc->tfms_count) != 1 ||
1254 !is_power_of_2(cc->tfms_count)) {
1255 ti->error = "Bad cipher key count specification";
1256 return -EINVAL;
1257 }
1258 cc->key_parts = cc->tfms_count;
Milan Broz5ebaee62010-08-12 04:14:07 +01001259
1260 cc->cipher = kstrdup(cipher, GFP_KERNEL);
1261 if (!cc->cipher)
1262 goto bad_mem;
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 chainmode = strsep(&tmp, "-");
1265 ivopts = strsep(&tmp, "-");
1266 ivmode = strsep(&ivopts, ":");
1267
1268 if (tmp)
Milan Broz5ebaee62010-08-12 04:14:07 +01001269 DMWARN("Ignoring unexpected additional cipher options");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Milan Brozd1f96422011-01-13 19:59:54 +00001271 cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
1272 cc->tfms_count * sizeof(*(cc->cpu->tfms)),
1273 __alignof__(struct crypt_cpu));
Andi Kleenc0297722011-01-13 19:59:53 +00001274 if (!cc->cpu) {
1275 ti->error = "Cannot allocate per cpu state";
1276 goto bad_mem;
1277 }
1278
Milan Broz7dbcd132011-01-13 19:59:52 +00001279 /*
1280 * For compatibility with the original dm-crypt mapping format, if
1281 * only the cipher name is supplied, use cbc-plain.
1282 */
Milan Broz5ebaee62010-08-12 04:14:07 +01001283 if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 chainmode = "cbc";
1285 ivmode = "plain";
1286 }
1287
Herbert Xud1806f62006-08-22 20:29:17 +10001288 if (strcmp(chainmode, "ecb") && !ivmode) {
Milan Broz5ebaee62010-08-12 04:14:07 +01001289 ti->error = "IV mechanism required";
1290 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 }
1292
Milan Broz5ebaee62010-08-12 04:14:07 +01001293 cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
1294 if (!cipher_api)
1295 goto bad_mem;
1296
1297 ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
1298 "%s(%s)", chainmode, cipher);
1299 if (ret < 0) {
1300 kfree(cipher_api);
1301 goto bad_mem;
Herbert Xud1806f62006-08-22 20:29:17 +10001302 }
1303
Milan Broz5ebaee62010-08-12 04:14:07 +01001304 /* Allocate cipher */
Andi Kleenc0297722011-01-13 19:59:53 +00001305 for_each_possible_cpu(cpu) {
Milan Brozd1f96422011-01-13 19:59:54 +00001306 ret = crypt_alloc_tfms(cc, cpu, cipher_api);
1307 if (ret < 0) {
Andi Kleenc0297722011-01-13 19:59:53 +00001308 ti->error = "Error allocating crypto tfm";
1309 goto bad;
1310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Milan Broz5ebaee62010-08-12 04:14:07 +01001313 /* Initialize and set key */
1314 ret = crypt_set_key(cc, key);
Milan Broz28513fc2010-08-12 04:14:06 +01001315 if (ret < 0) {
Milan Broz0b430952009-12-10 23:51:55 +00001316 ti->error = "Error decoding and setting key";
Milan Broz28513fc2010-08-12 04:14:06 +01001317 goto bad;
Milan Broz0b430952009-12-10 23:51:55 +00001318 }
1319
Milan Broz5ebaee62010-08-12 04:14:07 +01001320 /* Initialize IV */
Andi Kleenc0297722011-01-13 19:59:53 +00001321 cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
Milan Broz5ebaee62010-08-12 04:14:07 +01001322 if (cc->iv_size)
1323 /* at least a 64 bit sector number should fit in our buffer */
1324 cc->iv_size = max(cc->iv_size,
1325 (unsigned int)(sizeof(u64) / sizeof(u8)));
1326 else if (ivmode) {
1327 DMWARN("Selected cipher does not support IVs");
1328 ivmode = NULL;
1329 }
1330
1331 /* Choose ivmode, see comments at iv code. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (ivmode == NULL)
1333 cc->iv_gen_ops = NULL;
1334 else if (strcmp(ivmode, "plain") == 0)
1335 cc->iv_gen_ops = &crypt_iv_plain_ops;
Milan Broz61afef62009-12-10 23:52:25 +00001336 else if (strcmp(ivmode, "plain64") == 0)
1337 cc->iv_gen_ops = &crypt_iv_plain64_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 else if (strcmp(ivmode, "essiv") == 0)
1339 cc->iv_gen_ops = &crypt_iv_essiv_ops;
Rik Snel48527fa2006-09-03 08:56:39 +10001340 else if (strcmp(ivmode, "benbi") == 0)
1341 cc->iv_gen_ops = &crypt_iv_benbi_ops;
Ludwig Nussel46b47732007-05-09 02:32:55 -07001342 else if (strcmp(ivmode, "null") == 0)
1343 cc->iv_gen_ops = &crypt_iv_null_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 else {
Milan Broz5ebaee62010-08-12 04:14:07 +01001345 ret = -EINVAL;
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001346 ti->error = "Invalid IV mode";
Milan Broz28513fc2010-08-12 04:14:06 +01001347 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 }
1349
Milan Broz28513fc2010-08-12 04:14:06 +01001350 /* Allocate IV */
1351 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
1352 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
1353 if (ret < 0) {
1354 ti->error = "Error creating IV";
1355 goto bad;
1356 }
Milan Brozb95bf2d2009-12-10 23:51:56 +00001357 }
1358
Milan Broz28513fc2010-08-12 04:14:06 +01001359 /* Initialize IV (set keys for ESSIV etc) */
1360 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
1361 ret = cc->iv_gen_ops->init(cc);
1362 if (ret < 0) {
1363 ti->error = "Error initialising IV";
1364 goto bad;
1365 }
1366 }
1367
Milan Broz5ebaee62010-08-12 04:14:07 +01001368 ret = 0;
1369bad:
1370 kfree(cipher_api);
1371 return ret;
1372
1373bad_mem:
1374 ti->error = "Cannot allocate cipher strings";
1375 return -ENOMEM;
1376}
1377
1378/*
1379 * Construct an encryption mapping:
1380 * <cipher> <key> <iv_offset> <dev_path> <start>
1381 */
1382static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1383{
1384 struct crypt_config *cc;
1385 unsigned int key_size;
1386 unsigned long long tmpll;
1387 int ret;
1388
1389 if (argc != 5) {
1390 ti->error = "Not enough arguments";
1391 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 }
1393
Milan Broz5ebaee62010-08-12 04:14:07 +01001394 key_size = strlen(argv[1]) >> 1;
1395
1396 cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
1397 if (!cc) {
1398 ti->error = "Cannot allocate encryption context";
1399 return -ENOMEM;
1400 }
Milan Broz69a8cfc2011-01-13 19:59:49 +00001401 cc->key_size = key_size;
Milan Broz5ebaee62010-08-12 04:14:07 +01001402
1403 ti->private = cc;
1404 ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
1405 if (ret < 0)
1406 goto bad;
1407
Milan Broz28513fc2010-08-12 04:14:06 +01001408 ret = -ENOMEM;
Matthew Dobson93d23412006-03-26 01:37:50 -08001409 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 if (!cc->io_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001411 ti->error = "Cannot allocate crypt io mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001412 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 }
1414
Milan Brozddd42ed2008-02-08 02:11:07 +00001415 cc->dmreq_start = sizeof(struct ablkcipher_request);
Andi Kleenc0297722011-01-13 19:59:53 +00001416 cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
Milan Brozddd42ed2008-02-08 02:11:07 +00001417 cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
Andi Kleenc0297722011-01-13 19:59:53 +00001418 cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
Milan Broz3a7f6c92008-02-08 02:11:14 +00001419 ~(crypto_tfm_ctx_alignment() - 1);
Milan Brozddd42ed2008-02-08 02:11:07 +00001420
1421 cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
1422 sizeof(struct dm_crypt_request) + cc->iv_size);
1423 if (!cc->req_pool) {
1424 ti->error = "Cannot allocate crypt request mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001425 goto bad;
Milan Brozddd42ed2008-02-08 02:11:07 +00001426 }
Milan Brozddd42ed2008-02-08 02:11:07 +00001427
Matthew Dobsona19b27c2006-03-26 01:37:45 -08001428 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 if (!cc->page_pool) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001430 ti->error = "Cannot allocate page mempool";
Milan Broz28513fc2010-08-12 04:14:06 +01001431 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 }
1433
Jens Axboebb799ca2008-12-10 15:35:05 +01001434 cc->bs = bioset_create(MIN_IOS, 0);
Milan Broz6a24c712006-10-03 01:15:40 -07001435 if (!cc->bs) {
1436 ti->error = "Cannot allocate crypt bioset";
Milan Broz28513fc2010-08-12 04:14:06 +01001437 goto bad;
Milan Broz6a24c712006-10-03 01:15:40 -07001438 }
1439
Milan Broz28513fc2010-08-12 04:14:06 +01001440 ret = -EINVAL;
Andrew Morton4ee218c2006-03-27 01:17:48 -08001441 if (sscanf(argv[2], "%llu", &tmpll) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001442 ti->error = "Invalid iv_offset sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001443 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001445 cc->iv_offset = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Milan Broz28513fc2010-08-12 04:14:06 +01001447 if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
1448 ti->error = "Device lookup failed";
1449 goto bad;
1450 }
1451
Andrew Morton4ee218c2006-03-27 01:17:48 -08001452 if (sscanf(argv[4], "%llu", &tmpll) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001453 ti->error = "Invalid device sector";
Milan Broz28513fc2010-08-12 04:14:06 +01001454 goto bad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 }
Andrew Morton4ee218c2006-03-27 01:17:48 -08001456 cc->start = tmpll;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
Milan Broz28513fc2010-08-12 04:14:06 +01001458 ret = -ENOMEM;
Andi Kleenc0297722011-01-13 19:59:53 +00001459 cc->io_queue = alloc_workqueue("kcryptd_io",
1460 WQ_NON_REENTRANT|
1461 WQ_MEM_RECLAIM,
1462 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001463 if (!cc->io_queue) {
1464 ti->error = "Couldn't create kcryptd io queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001465 goto bad;
Milan Brozcabf08e2007-10-19 22:38:58 +01001466 }
1467
Andi Kleenc0297722011-01-13 19:59:53 +00001468 cc->crypt_queue = alloc_workqueue("kcryptd",
1469 WQ_NON_REENTRANT|
1470 WQ_CPU_INTENSIVE|
1471 WQ_MEM_RECLAIM,
1472 1);
Milan Brozcabf08e2007-10-19 22:38:58 +01001473 if (!cc->crypt_queue) {
Milan Broz9934a8b2007-10-19 22:38:57 +01001474 ti->error = "Couldn't create kcryptd queue";
Milan Broz28513fc2010-08-12 04:14:06 +01001475 goto bad;
Milan Broz9934a8b2007-10-19 22:38:57 +01001476 }
1477
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001478 ti->num_flush_requests = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 return 0;
1480
Milan Broz28513fc2010-08-12 04:14:06 +01001481bad:
1482 crypt_dtr(ti);
1483 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484}
1485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486static int crypt_map(struct dm_target *ti, struct bio *bio,
1487 union map_info *map_context)
1488{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001489 struct dm_crypt_io *io;
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001490 struct crypt_config *cc;
1491
Tejun Heod87f4c12010-09-03 11:56:19 +02001492 if (bio->bi_rw & REQ_FLUSH) {
Mikulas Patocka647c7db2009-06-22 10:12:23 +01001493 cc = ti->private;
1494 bio->bi_bdev = cc->dev->bdev;
1495 return DM_MAPIO_REMAPPED;
1496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001498 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
Milan Brozcabf08e2007-10-19 22:38:58 +01001499
Milan Broz20c82532011-01-13 19:59:53 +00001500 if (bio_data_dir(io->base_bio) == READ) {
1501 if (kcryptd_io_read(io, GFP_NOWAIT))
1502 kcryptd_queue_io(io);
1503 } else
Milan Brozcabf08e2007-10-19 22:38:58 +01001504 kcryptd_queue_crypt(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001506 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
1508
1509static int crypt_status(struct dm_target *ti, status_type_t type,
1510 char *result, unsigned int maxlen)
1511{
Milan Broz5ebaee62010-08-12 04:14:07 +01001512 struct crypt_config *cc = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 unsigned int sz = 0;
1514
1515 switch (type) {
1516 case STATUSTYPE_INFO:
1517 result[0] = '\0';
1518 break;
1519
1520 case STATUSTYPE_TABLE:
Milan Broz7dbcd132011-01-13 19:59:52 +00001521 DMEMIT("%s ", cc->cipher_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
1523 if (cc->key_size > 0) {
1524 if ((maxlen - sz) < ((cc->key_size << 1) + 1))
1525 return -ENOMEM;
1526
1527 crypt_encode_key(result + sz, cc->key, cc->key_size);
1528 sz += cc->key_size << 1;
1529 } else {
1530 if (sz >= maxlen)
1531 return -ENOMEM;
1532 result[sz++] = '-';
1533 }
1534
Andrew Morton4ee218c2006-03-27 01:17:48 -08001535 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
1536 cc->dev->name, (unsigned long long)cc->start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 break;
1538 }
1539 return 0;
1540}
1541
Milan Broze48d4bb2006-10-03 01:15:37 -07001542static void crypt_postsuspend(struct dm_target *ti)
1543{
1544 struct crypt_config *cc = ti->private;
1545
1546 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1547}
1548
1549static int crypt_preresume(struct dm_target *ti)
1550{
1551 struct crypt_config *cc = ti->private;
1552
1553 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
1554 DMERR("aborting resume - crypt key is not set.");
1555 return -EAGAIN;
1556 }
1557
1558 return 0;
1559}
1560
1561static void crypt_resume(struct dm_target *ti)
1562{
1563 struct crypt_config *cc = ti->private;
1564
1565 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
1566}
1567
1568/* Message interface
1569 * key set <key>
1570 * key wipe
1571 */
1572static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
1573{
1574 struct crypt_config *cc = ti->private;
Milan Broz542da312009-12-10 23:51:57 +00001575 int ret = -EINVAL;
Milan Broze48d4bb2006-10-03 01:15:37 -07001576
1577 if (argc < 2)
1578 goto error;
1579
1580 if (!strnicmp(argv[0], MESG_STR("key"))) {
1581 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
1582 DMWARN("not suspended during key manipulation.");
1583 return -EINVAL;
1584 }
Milan Broz542da312009-12-10 23:51:57 +00001585 if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) {
1586 ret = crypt_set_key(cc, argv[2]);
1587 if (ret)
1588 return ret;
1589 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
1590 ret = cc->iv_gen_ops->init(cc);
1591 return ret;
1592 }
1593 if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) {
1594 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
1595 ret = cc->iv_gen_ops->wipe(cc);
1596 if (ret)
1597 return ret;
1598 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001599 return crypt_wipe_key(cc);
Milan Broz542da312009-12-10 23:51:57 +00001600 }
Milan Broze48d4bb2006-10-03 01:15:37 -07001601 }
1602
1603error:
1604 DMWARN("unrecognised message received.");
1605 return -EINVAL;
1606}
1607
Milan Brozd41e26b2008-07-21 12:00:40 +01001608static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
1609 struct bio_vec *biovec, int max_size)
1610{
1611 struct crypt_config *cc = ti->private;
1612 struct request_queue *q = bdev_get_queue(cc->dev->bdev);
1613
1614 if (!q->merge_bvec_fn)
1615 return max_size;
1616
1617 bvm->bi_bdev = cc->dev->bdev;
Alasdair G Kergonb441a2622010-08-12 04:14:11 +01001618 bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
Milan Brozd41e26b2008-07-21 12:00:40 +01001619
1620 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
1621}
1622
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001623static int crypt_iterate_devices(struct dm_target *ti,
1624 iterate_devices_callout_fn fn, void *data)
1625{
1626 struct crypt_config *cc = ti->private;
1627
Mike Snitzer5dea2712009-07-23 20:30:42 +01001628 return fn(ti, cc->dev, cc->start, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001629}
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631static struct target_type crypt_target = {
1632 .name = "crypt",
Milan Brozd1f96422011-01-13 19:59:54 +00001633 .version = {1, 10, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 .module = THIS_MODULE,
1635 .ctr = crypt_ctr,
1636 .dtr = crypt_dtr,
1637 .map = crypt_map,
1638 .status = crypt_status,
Milan Broze48d4bb2006-10-03 01:15:37 -07001639 .postsuspend = crypt_postsuspend,
1640 .preresume = crypt_preresume,
1641 .resume = crypt_resume,
1642 .message = crypt_message,
Milan Brozd41e26b2008-07-21 12:00:40 +01001643 .merge = crypt_merge,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001644 .iterate_devices = crypt_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645};
1646
1647static int __init dm_crypt_init(void)
1648{
1649 int r;
1650
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001651 _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (!_crypt_io_pool)
1653 return -ENOMEM;
1654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 r = dm_register_target(&crypt_target);
1656 if (r < 0) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001657 DMERR("register failed %d", r);
Milan Broz9934a8b2007-10-19 22:38:57 +01001658 kmem_cache_destroy(_crypt_io_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 }
1660
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 return r;
1662}
1663
1664static void __exit dm_crypt_exit(void)
1665{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001666 dm_unregister_target(&crypt_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 kmem_cache_destroy(_crypt_io_pool);
1668}
1669
1670module_init(dm_crypt_init);
1671module_exit(dm_crypt_exit);
1672
1673MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
1674MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
1675MODULE_LICENSE("GPL");