| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to generic helpers functions | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/bio.h> | 
 | 7 | #include <linux/blkdev.h> | 
 | 8 | #include <linux/scatterlist.h> | 
 | 9 |  | 
 | 10 | #include "blk.h" | 
 | 11 |  | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 12 | struct bio_batch { | 
 | 13 | 	atomic_t		done; | 
 | 14 | 	unsigned long		flags; | 
 | 15 | 	struct completion	*wait; | 
 | 16 | }; | 
 | 17 |  | 
 | 18 | static void bio_batch_end_io(struct bio *bio, int err) | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 19 | { | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 20 | 	struct bio_batch *bb = bio->bi_private; | 
 | 21 |  | 
| Lukas Czerner | 8af1954 | 2011-05-06 19:30:01 -0600 | [diff] [blame] | 22 | 	if (err && (err != -EOPNOTSUPP)) | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 23 | 		clear_bit(BIO_UPTODATE, &bb->flags); | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 24 | 	if (atomic_dec_and_test(&bb->done)) | 
 | 25 | 		complete(bb->wait); | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 26 | 	bio_put(bio); | 
 | 27 | } | 
 | 28 |  | 
 | 29 | /** | 
 | 30 |  * blkdev_issue_discard - queue a discard | 
 | 31 |  * @bdev:	blockdev to issue discard for | 
 | 32 |  * @sector:	start sector | 
 | 33 |  * @nr_sects:	number of sectors to discard | 
 | 34 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
 | 35 |  * @flags:	BLKDEV_IFL_* flags to control behaviour | 
 | 36 |  * | 
 | 37 |  * Description: | 
 | 38 |  *    Issue a discard request for the sectors in question. | 
 | 39 |  */ | 
 | 40 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
 | 41 | 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | 
 | 42 | { | 
 | 43 | 	DECLARE_COMPLETION_ONSTACK(wait); | 
 | 44 | 	struct request_queue *q = bdev_get_queue(bdev); | 
| Christoph Hellwig | 8c55536 | 2010-08-18 05:29:22 -0400 | [diff] [blame] | 45 | 	int type = REQ_WRITE | REQ_DISCARD; | 
| Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 46 | 	unsigned int max_discard_sectors; | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 47 | 	struct bio_batch bb; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 48 | 	struct bio *bio; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 49 | 	int ret = 0; | 
 | 50 |  | 
 | 51 | 	if (!q) | 
 | 52 | 		return -ENXIO; | 
 | 53 |  | 
 | 54 | 	if (!blk_queue_discard(q)) | 
 | 55 | 		return -EOPNOTSUPP; | 
 | 56 |  | 
| Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 57 | 	/* | 
 | 58 | 	 * Ensure that max_discard_sectors is of the proper | 
 | 59 | 	 * granularity | 
 | 60 | 	 */ | 
 | 61 | 	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 
 | 62 | 	if (q->limits.discard_granularity) { | 
 | 63 | 		unsigned int disc_sects = q->limits.discard_granularity >> 9; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 64 |  | 
| Jens Axboe | 10d1f9e | 2010-07-15 10:49:31 -0600 | [diff] [blame] | 65 | 		max_discard_sectors &= ~(disc_sects - 1); | 
 | 66 | 	} | 
 | 67 |  | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 68 | 	if (flags & BLKDEV_DISCARD_SECURE) { | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 69 | 		if (!blk_queue_secdiscard(q)) | 
 | 70 | 			return -EOPNOTSUPP; | 
| Christoph Hellwig | 8c55536 | 2010-08-18 05:29:22 -0400 | [diff] [blame] | 71 | 		type |= REQ_SECURE; | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 72 | 	} | 
 | 73 |  | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 74 | 	atomic_set(&bb.done, 1); | 
 | 75 | 	bb.flags = 1 << BIO_UPTODATE; | 
 | 76 | 	bb.wait = &wait; | 
 | 77 |  | 
 | 78 | 	while (nr_sects) { | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 79 | 		bio = bio_alloc(gfp_mask, 1); | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 80 | 		if (!bio) { | 
 | 81 | 			ret = -ENOMEM; | 
 | 82 | 			break; | 
 | 83 | 		} | 
 | 84 |  | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 85 | 		bio->bi_sector = sector; | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 86 | 		bio->bi_end_io = bio_batch_end_io; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 87 | 		bio->bi_bdev = bdev; | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 88 | 		bio->bi_private = &bb; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 89 |  | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 90 | 		if (nr_sects > max_discard_sectors) { | 
 | 91 | 			bio->bi_size = max_discard_sectors << 9; | 
 | 92 | 			nr_sects -= max_discard_sectors; | 
 | 93 | 			sector += max_discard_sectors; | 
 | 94 | 		} else { | 
 | 95 | 			bio->bi_size = nr_sects << 9; | 
 | 96 | 			nr_sects = 0; | 
 | 97 | 		} | 
 | 98 |  | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 99 | 		atomic_inc(&bb.done); | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 100 | 		submit_bio(type, bio); | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 101 | 	} | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 102 |  | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 103 | 	/* Wait for bios in-flight */ | 
 | 104 | 	if (!atomic_dec_and_test(&bb.done)) | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 105 | 		wait_for_completion(&wait); | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 106 |  | 
| Lukas Czerner | 8af1954 | 2011-05-06 19:30:01 -0600 | [diff] [blame] | 107 | 	if (!test_bit(BIO_UPTODATE, &bb.flags)) | 
| Lukas Czerner | 5dba308 | 2011-05-06 19:26:27 -0600 | [diff] [blame] | 108 | 		ret = -EIO; | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 109 |  | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 110 | 	return ret; | 
| Dmitry Monakhov | f31e7e4 | 2010-04-28 17:55:08 +0400 | [diff] [blame] | 111 | } | 
 | 112 | EXPORT_SYMBOL(blkdev_issue_discard); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 113 |  | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 114 | /** | 
| Ben Hutchings | 291d24f | 2011-03-01 13:45:24 -0500 | [diff] [blame] | 115 |  * blkdev_issue_zeroout - generate number of zero filed write bios | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 116 |  * @bdev:	blockdev to issue | 
 | 117 |  * @sector:	start sector | 
 | 118 |  * @nr_sects:	number of sectors to write | 
 | 119 |  * @gfp_mask:	memory allocation flags (for bio_alloc) | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 120 |  * | 
 | 121 |  * Description: | 
 | 122 |  *  Generate and issue number of bios with zerofiled pages. | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 123 |  */ | 
 | 124 |  | 
 | 125 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 126 | 			sector_t nr_sects, gfp_t gfp_mask) | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 127 | { | 
| Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 128 | 	int ret; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 129 | 	struct bio *bio; | 
 | 130 | 	struct bio_batch bb; | 
| Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 131 | 	unsigned int sz; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 132 | 	DECLARE_COMPLETION_ONSTACK(wait); | 
 | 133 |  | 
| Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 134 | 	atomic_set(&bb.done, 1); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 135 | 	bb.flags = 1 << BIO_UPTODATE; | 
 | 136 | 	bb.wait = &wait; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 137 |  | 
| Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 138 | 	ret = 0; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 139 | 	while (nr_sects != 0) { | 
 | 140 | 		bio = bio_alloc(gfp_mask, | 
 | 141 | 				min(nr_sects, (sector_t)BIO_MAX_PAGES)); | 
| Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 142 | 		if (!bio) { | 
 | 143 | 			ret = -ENOMEM; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 144 | 			break; | 
| Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 145 | 		} | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 146 |  | 
 | 147 | 		bio->bi_sector = sector; | 
 | 148 | 		bio->bi_bdev   = bdev; | 
 | 149 | 		bio->bi_end_io = bio_batch_end_io; | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 150 | 		bio->bi_private = &bb; | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 151 |  | 
| Jens Axboe | 0341aaf | 2010-04-29 09:28:21 +0200 | [diff] [blame] | 152 | 		while (nr_sects != 0) { | 
 | 153 | 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 154 | 			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); | 
 | 155 | 			nr_sects -= ret >> 9; | 
 | 156 | 			sector += ret >> 9; | 
 | 157 | 			if (ret < (sz << 9)) | 
 | 158 | 				break; | 
 | 159 | 		} | 
| Dmitry Monakhov | 18edc8e | 2010-08-06 13:23:25 +0200 | [diff] [blame] | 160 | 		ret = 0; | 
| Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 161 | 		atomic_inc(&bb.done); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 162 | 		submit_bio(WRITE, bio); | 
 | 163 | 	} | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 164 |  | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 165 | 	/* Wait for bios in-flight */ | 
| Lukas Czerner | 0aeea18 | 2011-03-11 10:23:53 +0100 | [diff] [blame] | 166 | 	if (!atomic_dec_and_test(&bb.done)) | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 167 | 		wait_for_completion(&wait); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 168 |  | 
 | 169 | 	if (!test_bit(BIO_UPTODATE, &bb.flags)) | 
 | 170 | 		/* One of bios in the batch was completed with error.*/ | 
 | 171 | 		ret = -EIO; | 
 | 172 |  | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 173 | 	return ret; | 
 | 174 | } | 
 | 175 | EXPORT_SYMBOL(blkdev_issue_zeroout); |