| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to mapping data to requests | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/bio.h> | 
 | 7 | #include <linux/blkdev.h> | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 8 | #include <scsi/sg.h>		/* for struct sg_iovec */ | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 9 |  | 
 | 10 | #include "blk.h" | 
 | 11 |  | 
 | 12 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 
 | 13 | 		      struct bio *bio) | 
 | 14 | { | 
 | 15 | 	if (!rq->bio) | 
 | 16 | 		blk_rq_bio_prep(q, rq, bio); | 
 | 17 | 	else if (!ll_back_merge_fn(q, rq, bio)) | 
 | 18 | 		return -EINVAL; | 
 | 19 | 	else { | 
 | 20 | 		rq->biotail->bi_next = bio; | 
 | 21 | 		rq->biotail = bio; | 
 | 22 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 23 | 		rq->__data_len += bio->bi_size; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 24 | 	} | 
 | 25 | 	return 0; | 
 | 26 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 27 |  | 
 | 28 | static int __blk_rq_unmap_user(struct bio *bio) | 
 | 29 | { | 
 | 30 | 	int ret = 0; | 
 | 31 |  | 
 | 32 | 	if (bio) { | 
 | 33 | 		if (bio_flagged(bio, BIO_USER_MAPPED)) | 
 | 34 | 			bio_unmap_user(bio); | 
 | 35 | 		else | 
 | 36 | 			ret = bio_uncopy_user(bio); | 
 | 37 | 	} | 
 | 38 |  | 
 | 39 | 	return ret; | 
 | 40 | } | 
 | 41 |  | 
 | 42 | static int __blk_rq_map_user(struct request_queue *q, struct request *rq, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 43 | 			     struct rq_map_data *map_data, void __user *ubuf, | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 44 | 			     unsigned int len, gfp_t gfp_mask) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 45 | { | 
 | 46 | 	unsigned long uaddr; | 
 | 47 | 	struct bio *bio, *orig_bio; | 
 | 48 | 	int reading, ret; | 
 | 49 |  | 
 | 50 | 	reading = rq_data_dir(rq) == READ; | 
 | 51 |  | 
 | 52 | 	/* | 
 | 53 | 	 * if alignment requirement is satisfied, map in user pages for | 
 | 54 | 	 * direct dma. else, set up kernel bounce buffers | 
 | 55 | 	 */ | 
 | 56 | 	uaddr = (unsigned long) ubuf; | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 57 | 	if (blk_rq_aligned(q, uaddr, len) && !map_data) | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 58 | 		bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 59 | 	else | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 60 | 		bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 61 |  | 
 | 62 | 	if (IS_ERR(bio)) | 
 | 63 | 		return PTR_ERR(bio); | 
 | 64 |  | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 65 | 	if (map_data && map_data->null_mapped) | 
| FUJITA Tomonori | 8188276 | 2008-09-02 16:20:19 +0900 | [diff] [blame] | 66 | 		bio->bi_flags |= (1 << BIO_NULL_MAPPED); | 
 | 67 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 68 | 	orig_bio = bio; | 
 | 69 | 	blk_queue_bounce(q, &bio); | 
 | 70 |  | 
 | 71 | 	/* | 
 | 72 | 	 * We link the bounce buffer in and could have to traverse it | 
 | 73 | 	 * later so we have to get a ref to prevent it from being freed | 
 | 74 | 	 */ | 
 | 75 | 	bio_get(bio); | 
 | 76 |  | 
 | 77 | 	ret = blk_rq_append_bio(q, rq, bio); | 
 | 78 | 	if (!ret) | 
 | 79 | 		return bio->bi_size; | 
 | 80 |  | 
 | 81 | 	/* if it was boucned we must call the end io function */ | 
 | 82 | 	bio_endio(bio, 0); | 
 | 83 | 	__blk_rq_unmap_user(orig_bio); | 
 | 84 | 	bio_put(bio); | 
 | 85 | 	return ret; | 
 | 86 | } | 
 | 87 |  | 
 | 88 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 89 |  * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 90 |  * @q:		request queue where request should be inserted | 
 | 91 |  * @rq:		request structure to fill | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 92 |  * @map_data:   pointer to the rq_map_data holding pages (if necessary) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 93 |  * @ubuf:	the user buffer | 
 | 94 |  * @len:	length of user data | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 95 |  * @gfp_mask:	memory allocation flags | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 96 |  * | 
 | 97 |  * Description: | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 98 |  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 99 |  *    a kernel bounce buffer is used. | 
 | 100 |  * | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 101 |  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 102 |  *    still in process context. | 
 | 103 |  * | 
 | 104 |  *    Note: The mapped bio may need to be bounced through blk_queue_bounce() | 
 | 105 |  *    before being submitted to the device, as pages mapped may be out of | 
 | 106 |  *    reach. It's the callers responsibility to make sure this happens. The | 
 | 107 |  *    original bio must be passed back in to blk_rq_unmap_user() for proper | 
 | 108 |  *    unmapping. | 
 | 109 |  */ | 
 | 110 | int blk_rq_map_user(struct request_queue *q, struct request *rq, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 111 | 		    struct rq_map_data *map_data, void __user *ubuf, | 
 | 112 | 		    unsigned long len, gfp_t gfp_mask) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 113 | { | 
 | 114 | 	unsigned long bytes_read = 0; | 
 | 115 | 	struct bio *bio = NULL; | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 116 | 	int ret; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 117 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 118 | 	if (len > (queue_max_hw_sectors(q) << 9)) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 119 | 		return -EINVAL; | 
| FUJITA Tomonori | 8188276 | 2008-09-02 16:20:19 +0900 | [diff] [blame] | 120 | 	if (!len) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 121 | 		return -EINVAL; | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 122 |  | 
 | 123 | 	if (!ubuf && (!map_data || !map_data->null_mapped)) | 
 | 124 | 		return -EINVAL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 125 |  | 
 | 126 | 	while (bytes_read != len) { | 
 | 127 | 		unsigned long map_len, end, start; | 
 | 128 |  | 
 | 129 | 		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); | 
 | 130 | 		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) | 
 | 131 | 								>> PAGE_SHIFT; | 
 | 132 | 		start = (unsigned long)ubuf >> PAGE_SHIFT; | 
 | 133 |  | 
 | 134 | 		/* | 
 | 135 | 		 * A bad offset could cause us to require BIO_MAX_PAGES + 1 | 
 | 136 | 		 * pages. If this happens we just lower the requested | 
 | 137 | 		 * mapping len by a page so that we can fit | 
 | 138 | 		 */ | 
 | 139 | 		if (end - start > BIO_MAX_PAGES) | 
 | 140 | 			map_len -= PAGE_SIZE; | 
 | 141 |  | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 142 | 		ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 143 | 					gfp_mask); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 144 | 		if (ret < 0) | 
 | 145 | 			goto unmap_rq; | 
 | 146 | 		if (!bio) | 
 | 147 | 			bio = rq->bio; | 
 | 148 | 		bytes_read += ret; | 
 | 149 | 		ubuf += ret; | 
| FUJITA Tomonori | 56c451f | 2008-12-18 14:49:37 +0900 | [diff] [blame] | 150 |  | 
 | 151 | 		if (map_data) | 
 | 152 | 			map_data->offset += ret; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 153 | 	} | 
 | 154 |  | 
| FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 155 | 	if (!bio_flagged(bio, BIO_USER_MAPPED)) | 
 | 156 | 		rq->cmd_flags |= REQ_COPY_USER; | 
| Tejun Heo | 40b01b9 | 2008-02-19 11:35:38 +0100 | [diff] [blame] | 157 |  | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 158 | 	rq->buffer = NULL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 159 | 	return 0; | 
 | 160 | unmap_rq: | 
 | 161 | 	blk_rq_unmap_user(bio); | 
| Jens Axboe | 84e9e03 | 2008-02-18 13:51:56 +0100 | [diff] [blame] | 162 | 	rq->bio = NULL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 163 | 	return ret; | 
 | 164 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 165 | EXPORT_SYMBOL(blk_rq_map_user); | 
 | 166 |  | 
 | 167 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 168 |  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 169 |  * @q:		request queue where request should be inserted | 
 | 170 |  * @rq:		request to map data to | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 171 |  * @map_data:   pointer to the rq_map_data holding pages (if necessary) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 172 |  * @iov:	pointer to the iovec | 
 | 173 |  * @iov_count:	number of elements in the iovec | 
 | 174 |  * @len:	I/O byte count | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 175 |  * @gfp_mask:	memory allocation flags | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 176 |  * | 
 | 177 |  * Description: | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 178 |  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 179 |  *    a kernel bounce buffer is used. | 
 | 180 |  * | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 181 |  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 182 |  *    still in process context. | 
 | 183 |  * | 
 | 184 |  *    Note: The mapped bio may need to be bounced through blk_queue_bounce() | 
 | 185 |  *    before being submitted to the device, as pages mapped may be out of | 
 | 186 |  *    reach. It's the callers responsibility to make sure this happens. The | 
 | 187 |  *    original bio must be passed back in to blk_rq_unmap_user() for proper | 
 | 188 |  *    unmapping. | 
 | 189 |  */ | 
 | 190 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 191 | 			struct rq_map_data *map_data, struct sg_iovec *iov, | 
 | 192 | 			int iov_count, unsigned int len, gfp_t gfp_mask) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 193 | { | 
 | 194 | 	struct bio *bio; | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 195 | 	int i, read = rq_data_dir(rq) == READ; | 
 | 196 | 	int unaligned = 0; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 197 |  | 
 | 198 | 	if (!iov || iov_count <= 0) | 
 | 199 | 		return -EINVAL; | 
 | 200 |  | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 201 | 	for (i = 0; i < iov_count; i++) { | 
 | 202 | 		unsigned long uaddr = (unsigned long)iov[i].iov_base; | 
 | 203 |  | 
| Xiaotian Feng | 5478755 | 2010-11-29 10:03:55 +0100 | [diff] [blame] | 204 | 		if (!iov[i].iov_len) | 
 | 205 | 			return -EINVAL; | 
 | 206 |  | 
| Ben Hutchings | 6b76106 | 2011-11-13 19:58:09 +0100 | [diff] [blame] | 207 | 		/* | 
 | 208 | 		 * Keep going so we check length of all segments | 
 | 209 | 		 */ | 
 | 210 | 		if (uaddr & queue_dma_alignment(q)) | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 211 | 			unaligned = 1; | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 212 | 	} | 
 | 213 |  | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 214 | 	if (unaligned || (q->dma_pad_mask & len) || map_data) | 
 | 215 | 		bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, | 
 | 216 | 					gfp_mask); | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 217 | 	else | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 218 | 		bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); | 
| FUJITA Tomonori | afdc1a7 | 2008-04-11 12:56:51 +0200 | [diff] [blame] | 219 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 220 | 	if (IS_ERR(bio)) | 
 | 221 | 		return PTR_ERR(bio); | 
 | 222 |  | 
 | 223 | 	if (bio->bi_size != len) { | 
| Jens Axboe | c26156b | 2008-11-18 15:07:05 +0100 | [diff] [blame] | 224 | 		/* | 
 | 225 | 		 * Grab an extra reference to this bio, as bio_unmap_user() | 
 | 226 | 		 * expects to be able to drop it twice as it happens on the | 
 | 227 | 		 * normal IO completion path | 
 | 228 | 		 */ | 
 | 229 | 		bio_get(bio); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 230 | 		bio_endio(bio, 0); | 
| Petr Vandrovec | 53cc0b2 | 2008-11-19 11:12:14 +0100 | [diff] [blame] | 231 | 		__blk_rq_unmap_user(bio); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 232 | 		return -EINVAL; | 
 | 233 | 	} | 
 | 234 |  | 
| FUJITA Tomonori | f18573a | 2008-04-11 12:56:52 +0200 | [diff] [blame] | 235 | 	if (!bio_flagged(bio, BIO_USER_MAPPED)) | 
 | 236 | 		rq->cmd_flags |= REQ_COPY_USER; | 
 | 237 |  | 
| FUJITA Tomonori | 07359fc | 2008-06-26 19:39:23 +0200 | [diff] [blame] | 238 | 	blk_queue_bounce(q, &bio); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 239 | 	bio_get(bio); | 
 | 240 | 	blk_rq_bio_prep(q, rq, bio); | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 241 | 	rq->buffer = NULL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 242 | 	return 0; | 
 | 243 | } | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 244 | EXPORT_SYMBOL(blk_rq_map_user_iov); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 245 |  | 
 | 246 | /** | 
 | 247 |  * blk_rq_unmap_user - unmap a request with user data | 
 | 248 |  * @bio:	       start of bio list | 
 | 249 |  * | 
 | 250 |  * Description: | 
 | 251 |  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must | 
 | 252 |  *    supply the original rq->bio from the blk_rq_map_user() return, since | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 253 |  *    the I/O completion may have changed rq->bio. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 254 |  */ | 
 | 255 | int blk_rq_unmap_user(struct bio *bio) | 
 | 256 | { | 
 | 257 | 	struct bio *mapped_bio; | 
 | 258 | 	int ret = 0, ret2; | 
 | 259 |  | 
 | 260 | 	while (bio) { | 
 | 261 | 		mapped_bio = bio; | 
 | 262 | 		if (unlikely(bio_flagged(bio, BIO_BOUNCED))) | 
 | 263 | 			mapped_bio = bio->bi_private; | 
 | 264 |  | 
 | 265 | 		ret2 = __blk_rq_unmap_user(mapped_bio); | 
 | 266 | 		if (ret2 && !ret) | 
 | 267 | 			ret = ret2; | 
 | 268 |  | 
 | 269 | 		mapped_bio = bio; | 
 | 270 | 		bio = bio->bi_next; | 
 | 271 | 		bio_put(mapped_bio); | 
 | 272 | 	} | 
 | 273 |  | 
 | 274 | 	return ret; | 
 | 275 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 276 | EXPORT_SYMBOL(blk_rq_unmap_user); | 
 | 277 |  | 
 | 278 | /** | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 279 |  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 280 |  * @q:		request queue where request should be inserted | 
 | 281 |  * @rq:		request to fill | 
 | 282 |  * @kbuf:	the kernel buffer | 
 | 283 |  * @len:	length of user data | 
 | 284 |  * @gfp_mask:	memory allocation flags | 
| FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 285 |  * | 
 | 286 |  * Description: | 
 | 287 |  *    Data will be mapped directly if possible. Otherwise a bounce | 
| James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 288 |  *    buffer is used. Can be called multple times to append multple | 
 | 289 |  *    buffers. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 290 |  */ | 
 | 291 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, | 
 | 292 | 		    unsigned int len, gfp_t gfp_mask) | 
 | 293 | { | 
| FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 294 | 	int reading = rq_data_dir(rq) == READ; | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 295 | 	unsigned long addr = (unsigned long) kbuf; | 
| FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 296 | 	int do_copy = 0; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 297 | 	struct bio *bio; | 
| James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 298 | 	int ret; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 299 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 300 | 	if (len > (queue_max_hw_sectors(q) << 9)) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 301 | 		return -EINVAL; | 
 | 302 | 	if (!len || !kbuf) | 
 | 303 | 		return -EINVAL; | 
 | 304 |  | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 305 | 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); | 
| FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 306 | 	if (do_copy) | 
 | 307 | 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | 
 | 308 | 	else | 
 | 309 | 		bio = bio_map_kern(q, kbuf, len, gfp_mask); | 
 | 310 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 311 | 	if (IS_ERR(bio)) | 
 | 312 | 		return PTR_ERR(bio); | 
 | 313 |  | 
| majianpeng | 609f6ea | 2011-12-21 15:27:24 +0100 | [diff] [blame] | 314 | 	if (!reading) | 
| Benny Halevy | a45dc2d | 2010-09-13 21:32:19 +0200 | [diff] [blame] | 315 | 		bio->bi_rw |= REQ_WRITE; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 316 |  | 
| FUJITA Tomonori | 68154e9 | 2008-04-25 12:47:50 +0200 | [diff] [blame] | 317 | 	if (do_copy) | 
 | 318 | 		rq->cmd_flags |= REQ_COPY_USER; | 
 | 319 |  | 
| James Bottomley | 3a5a392 | 2009-05-17 18:55:18 +0300 | [diff] [blame] | 320 | 	ret = blk_rq_append_bio(q, rq, bio); | 
 | 321 | 	if (unlikely(ret)) { | 
 | 322 | 		/* request is too big */ | 
 | 323 | 		bio_put(bio); | 
 | 324 | 		return ret; | 
 | 325 | 	} | 
 | 326 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 327 | 	blk_queue_bounce(q, &rq->bio); | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 328 | 	rq->buffer = NULL; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 329 | 	return 0; | 
 | 330 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 331 | EXPORT_SYMBOL(blk_rq_map_kern); |