| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * Functions related to setting various queue properties from drivers | 
 | 3 |  */ | 
 | 4 | #include <linux/kernel.h> | 
 | 5 | #include <linux/module.h> | 
 | 6 | #include <linux/init.h> | 
 | 7 | #include <linux/bio.h> | 
 | 8 | #include <linux/blkdev.h> | 
 | 9 | #include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */ | 
 | 10 |  | 
 | 11 | #include "blk.h" | 
 | 12 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 13 | unsigned long blk_max_low_pfn; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 14 | EXPORT_SYMBOL(blk_max_low_pfn); | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 15 |  | 
 | 16 | unsigned long blk_max_pfn; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 17 |  | 
 | 18 | /** | 
 | 19 |  * blk_queue_prep_rq - set a prepare_request function for queue | 
 | 20 |  * @q:		queue | 
 | 21 |  * @pfn:	prepare_request function | 
 | 22 |  * | 
 | 23 |  * It's possible for a queue to register a prepare_request callback which | 
 | 24 |  * is invoked before the request is handed to the request_fn. The goal of | 
 | 25 |  * the function is to prepare a request for I/O, it can be used to build a | 
 | 26 |  * cdb from the request data for instance. | 
 | 27 |  * | 
 | 28 |  */ | 
 | 29 | void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | 
 | 30 | { | 
 | 31 | 	q->prep_rq_fn = pfn; | 
 | 32 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 33 | EXPORT_SYMBOL(blk_queue_prep_rq); | 
 | 34 |  | 
 | 35 | /** | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 36 |  * blk_queue_set_discard - set a discard_sectors function for queue | 
 | 37 |  * @q:		queue | 
 | 38 |  * @dfn:	prepare_discard function | 
 | 39 |  * | 
 | 40 |  * It's possible for a queue to register a discard callback which is used | 
 | 41 |  * to transform a discard request into the appropriate type for the | 
 | 42 |  * hardware. If none is registered, then discard requests are failed | 
 | 43 |  * with %EOPNOTSUPP. | 
 | 44 |  * | 
 | 45 |  */ | 
 | 46 | void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) | 
 | 47 | { | 
 | 48 | 	q->prepare_discard_fn = dfn; | 
 | 49 | } | 
 | 50 | EXPORT_SYMBOL(blk_queue_set_discard); | 
 | 51 |  | 
 | 52 | /** | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 53 |  * blk_queue_merge_bvec - set a merge_bvec function for queue | 
 | 54 |  * @q:		queue | 
 | 55 |  * @mbfn:	merge_bvec_fn | 
 | 56 |  * | 
 | 57 |  * Usually queues have static limitations on the max sectors or segments that | 
 | 58 |  * we can put in a request. Stacking drivers may have some settings that | 
 | 59 |  * are dynamic, and thus we have to query the queue whether it is ok to | 
 | 60 |  * add a new bio_vec to a bio at a given offset or not. If the block device | 
 | 61 |  * has such limitations, it needs to register a merge_bvec_fn to control | 
 | 62 |  * the size of bio's sent to it. Note that a block device *must* allow a | 
 | 63 |  * single page to be added to an empty bio. The block device driver may want | 
 | 64 |  * to use the bio_split() function to deal with these bio's. By default | 
 | 65 |  * no merge_bvec_fn is defined for a queue, and only the fixed limits are | 
 | 66 |  * honored. | 
 | 67 |  */ | 
 | 68 | void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) | 
 | 69 | { | 
 | 70 | 	q->merge_bvec_fn = mbfn; | 
 | 71 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 72 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 
 | 73 |  | 
 | 74 | void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) | 
 | 75 | { | 
 | 76 | 	q->softirq_done_fn = fn; | 
 | 77 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 78 | EXPORT_SYMBOL(blk_queue_softirq_done); | 
 | 79 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 80 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) | 
 | 81 | { | 
 | 82 | 	q->rq_timeout = timeout; | 
 | 83 | } | 
 | 84 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); | 
 | 85 |  | 
 | 86 | void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) | 
 | 87 | { | 
 | 88 | 	q->rq_timed_out_fn = fn; | 
 | 89 | } | 
 | 90 | EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); | 
 | 91 |  | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 92 | void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) | 
 | 93 | { | 
 | 94 | 	q->lld_busy_fn = fn; | 
 | 95 | } | 
 | 96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | 
 | 97 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 98 | /** | 
 | 99 |  * blk_queue_make_request - define an alternate make_request function for a device | 
 | 100 |  * @q:  the request queue for the device to be affected | 
 | 101 |  * @mfn: the alternate make_request function | 
 | 102 |  * | 
 | 103 |  * Description: | 
 | 104 |  *    The normal way for &struct bios to be passed to a device | 
 | 105 |  *    driver is for them to be collected into requests on a request | 
 | 106 |  *    queue, and then to allow the device driver to select requests | 
 | 107 |  *    off that queue when it is ready.  This works well for many block | 
 | 108 |  *    devices. However some block devices (typically virtual devices | 
 | 109 |  *    such as md or lvm) do not benefit from the processing on the | 
 | 110 |  *    request queue, and are served best by having the requests passed | 
 | 111 |  *    directly to them.  This can be achieved by providing a function | 
 | 112 |  *    to blk_queue_make_request(). | 
 | 113 |  * | 
 | 114 |  * Caveat: | 
 | 115 |  *    The driver that does this *must* be able to deal appropriately | 
 | 116 |  *    with buffers in "highmemory". This can be accomplished by either calling | 
 | 117 |  *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling | 
 | 118 |  *    blk_queue_bounce() to create a buffer in normal memory. | 
 | 119 |  **/ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 120 | void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 121 | { | 
 | 122 | 	/* | 
 | 123 | 	 * set defaults | 
 | 124 | 	 */ | 
 | 125 | 	q->nr_requests = BLKDEV_MAX_RQ; | 
 | 126 | 	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | 
 | 127 | 	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | 
 | 128 | 	q->make_request_fn = mfn; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 129 | 	q->backing_dev_info.ra_pages = | 
 | 130 | 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 131 | 	q->backing_dev_info.state = 0; | 
 | 132 | 	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 
 | 133 | 	blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | 
 | 134 | 	blk_queue_hardsect_size(q, 512); | 
 | 135 | 	blk_queue_dma_alignment(q, 511); | 
 | 136 | 	blk_queue_congestion_threshold(q); | 
 | 137 | 	q->nr_batching = BLK_BATCH_REQ; | 
 | 138 |  | 
 | 139 | 	q->unplug_thresh = 4;		/* hmm */ | 
 | 140 | 	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */ | 
 | 141 | 	if (q->unplug_delay == 0) | 
 | 142 | 		q->unplug_delay = 1; | 
 | 143 |  | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 144 | 	q->unplug_timer.function = blk_unplug_timeout; | 
 | 145 | 	q->unplug_timer.data = (unsigned long)q; | 
 | 146 |  | 
 | 147 | 	/* | 
 | 148 | 	 * by default assume old behaviour and bounce for any highmem page | 
 | 149 | 	 */ | 
 | 150 | 	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 
 | 151 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 152 | EXPORT_SYMBOL(blk_queue_make_request); | 
 | 153 |  | 
 | 154 | /** | 
 | 155 |  * blk_queue_bounce_limit - set bounce buffer limit for queue | 
 | 156 |  * @q:  the request queue for the device | 
 | 157 |  * @dma_addr:   bus address limit | 
 | 158 |  * | 
 | 159 |  * Description: | 
 | 160 |  *    Different hardware can have different requirements as to what pages | 
 | 161 |  *    it can do I/O directly to. A low level driver can call | 
 | 162 |  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 163 |  *    buffers for doing I/O to pages residing above @dma_addr. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 164 |  **/ | 
 | 165 | void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) | 
 | 166 | { | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 167 | 	unsigned long b_pfn = dma_addr >> PAGE_SHIFT; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 168 | 	int dma = 0; | 
 | 169 |  | 
 | 170 | 	q->bounce_gfp = GFP_NOIO; | 
 | 171 | #if BITS_PER_LONG == 64 | 
 | 172 | 	/* Assume anything <= 4GB can be handled by IOMMU. | 
 | 173 | 	   Actually some IOMMUs can handle everything, but I don't | 
 | 174 | 	   know of a way to test this here. */ | 
| Andrea Arcangeli | 00d61e3 | 2008-04-02 09:06:44 +0200 | [diff] [blame] | 175 | 	if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 176 | 		dma = 1; | 
 | 177 | 	q->bounce_pfn = max_low_pfn; | 
 | 178 | #else | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 179 | 	if (b_pfn < blk_max_low_pfn) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 180 | 		dma = 1; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 181 | 	q->bounce_pfn = b_pfn; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 182 | #endif | 
 | 183 | 	if (dma) { | 
 | 184 | 		init_emergency_isa_pool(); | 
 | 185 | 		q->bounce_gfp = GFP_NOIO | GFP_DMA; | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 186 | 		q->bounce_pfn = b_pfn; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 187 | 	} | 
 | 188 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 189 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 
 | 190 |  | 
 | 191 | /** | 
 | 192 |  * blk_queue_max_sectors - set max sectors for a request for this queue | 
 | 193 |  * @q:  the request queue for the device | 
 | 194 |  * @max_sectors:  max sectors in the usual 512b unit | 
 | 195 |  * | 
 | 196 |  * Description: | 
 | 197 |  *    Enables a low level driver to set an upper limit on the size of | 
 | 198 |  *    received requests. | 
 | 199 |  **/ | 
 | 200 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | 
 | 201 | { | 
 | 202 | 	if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 
 | 203 | 		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 204 | 		printk(KERN_INFO "%s: set to minimum %d\n", | 
 | 205 | 		       __func__, max_sectors); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 206 | 	} | 
 | 207 |  | 
 | 208 | 	if (BLK_DEF_MAX_SECTORS > max_sectors) | 
 | 209 | 		q->max_hw_sectors = q->max_sectors = max_sectors; | 
 | 210 | 	else { | 
 | 211 | 		q->max_sectors = BLK_DEF_MAX_SECTORS; | 
 | 212 | 		q->max_hw_sectors = max_sectors; | 
 | 213 | 	} | 
 | 214 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 215 | EXPORT_SYMBOL(blk_queue_max_sectors); | 
 | 216 |  | 
 | 217 | /** | 
 | 218 |  * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 
 | 219 |  * @q:  the request queue for the device | 
 | 220 |  * @max_segments:  max number of segments | 
 | 221 |  * | 
 | 222 |  * Description: | 
 | 223 |  *    Enables a low level driver to set an upper limit on the number of | 
 | 224 |  *    physical data segments in a request.  This would be the largest sized | 
 | 225 |  *    scatter list the driver could handle. | 
 | 226 |  **/ | 
 | 227 | void blk_queue_max_phys_segments(struct request_queue *q, | 
 | 228 | 				 unsigned short max_segments) | 
 | 229 | { | 
 | 230 | 	if (!max_segments) { | 
 | 231 | 		max_segments = 1; | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 232 | 		printk(KERN_INFO "%s: set to minimum %d\n", | 
 | 233 | 		       __func__, max_segments); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 234 | 	} | 
 | 235 |  | 
 | 236 | 	q->max_phys_segments = max_segments; | 
 | 237 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 238 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | 
 | 239 |  | 
 | 240 | /** | 
 | 241 |  * blk_queue_max_hw_segments - set max hw segments for a request for this queue | 
 | 242 |  * @q:  the request queue for the device | 
 | 243 |  * @max_segments:  max number of segments | 
 | 244 |  * | 
 | 245 |  * Description: | 
 | 246 |  *    Enables a low level driver to set an upper limit on the number of | 
 | 247 |  *    hw data segments in a request.  This would be the largest number of | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 248 |  *    address/length pairs the host adapter can actually give at once | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 249 |  *    to the device. | 
 | 250 |  **/ | 
 | 251 | void blk_queue_max_hw_segments(struct request_queue *q, | 
 | 252 | 			       unsigned short max_segments) | 
 | 253 | { | 
 | 254 | 	if (!max_segments) { | 
 | 255 | 		max_segments = 1; | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 256 | 		printk(KERN_INFO "%s: set to minimum %d\n", | 
 | 257 | 		       __func__, max_segments); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 258 | 	} | 
 | 259 |  | 
 | 260 | 	q->max_hw_segments = max_segments; | 
 | 261 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 262 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | 
 | 263 |  | 
 | 264 | /** | 
 | 265 |  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 
 | 266 |  * @q:  the request queue for the device | 
 | 267 |  * @max_size:  max size of segment in bytes | 
 | 268 |  * | 
 | 269 |  * Description: | 
 | 270 |  *    Enables a low level driver to set an upper limit on the size of a | 
 | 271 |  *    coalesced segment | 
 | 272 |  **/ | 
 | 273 | void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) | 
 | 274 | { | 
 | 275 | 	if (max_size < PAGE_CACHE_SIZE) { | 
 | 276 | 		max_size = PAGE_CACHE_SIZE; | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 277 | 		printk(KERN_INFO "%s: set to minimum %d\n", | 
 | 278 | 		       __func__, max_size); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 279 | 	} | 
 | 280 |  | 
 | 281 | 	q->max_segment_size = max_size; | 
 | 282 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 283 | EXPORT_SYMBOL(blk_queue_max_segment_size); | 
 | 284 |  | 
 | 285 | /** | 
 | 286 |  * blk_queue_hardsect_size - set hardware sector size for the queue | 
 | 287 |  * @q:  the request queue for the device | 
 | 288 |  * @size:  the hardware sector size, in bytes | 
 | 289 |  * | 
 | 290 |  * Description: | 
 | 291 |  *   This should typically be set to the lowest possible sector size | 
 | 292 |  *   that the hardware can operate on (possible without reverting to | 
 | 293 |  *   even internal read-modify-write operations). Usually the default | 
 | 294 |  *   of 512 covers most hardware. | 
 | 295 |  **/ | 
 | 296 | void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) | 
 | 297 | { | 
 | 298 | 	q->hardsect_size = size; | 
 | 299 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 300 | EXPORT_SYMBOL(blk_queue_hardsect_size); | 
 | 301 |  | 
 | 302 | /* | 
 | 303 |  * Returns the minimum that is _not_ zero, unless both are zero. | 
 | 304 |  */ | 
 | 305 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | 
 | 306 |  | 
 | 307 | /** | 
 | 308 |  * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers | 
 | 309 |  * @t:	the stacking driver (top) | 
 | 310 |  * @b:  the underlying device (bottom) | 
 | 311 |  **/ | 
 | 312 | void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) | 
 | 313 | { | 
 | 314 | 	/* zero is "infinity" */ | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 315 | 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 
 | 316 | 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 317 |  | 
| Jens Axboe | 6728cb0 | 2008-01-31 13:03:55 +0100 | [diff] [blame] | 318 | 	t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); | 
 | 319 | 	t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); | 
 | 320 | 	t->max_segment_size = min(t->max_segment_size, b->max_segment_size); | 
 | 321 | 	t->hardsect_size = max(t->hardsect_size, b->hardsect_size); | 
| Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 322 | 	if (!t->queue_lock) | 
 | 323 | 		WARN_ON_ONCE(1); | 
 | 324 | 	else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { | 
 | 325 | 		unsigned long flags; | 
 | 326 | 		spin_lock_irqsave(t->queue_lock, flags); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 327 | 		queue_flag_clear(QUEUE_FLAG_CLUSTER, t); | 
| Neil Brown | e7e72bf | 2008-05-14 16:05:54 -0700 | [diff] [blame] | 328 | 		spin_unlock_irqrestore(t->queue_lock, flags); | 
 | 329 | 	} | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 330 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 331 | EXPORT_SYMBOL(blk_queue_stack_limits); | 
 | 332 |  | 
 | 333 | /** | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 334 |  * blk_queue_dma_pad - set pad mask | 
 | 335 |  * @q:     the request queue for the device | 
 | 336 |  * @mask:  pad mask | 
 | 337 |  * | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 338 |  * Set dma pad mask. | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 339 |  * | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 340 |  * Appending pad buffer to a request modifies the last entry of a | 
 | 341 |  * scatter list such that it includes the pad buffer. | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 342 |  **/ | 
 | 343 | void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) | 
 | 344 | { | 
 | 345 | 	q->dma_pad_mask = mask; | 
 | 346 | } | 
 | 347 | EXPORT_SYMBOL(blk_queue_dma_pad); | 
 | 348 |  | 
 | 349 | /** | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 350 |  * blk_queue_update_dma_pad - update pad mask | 
 | 351 |  * @q:     the request queue for the device | 
 | 352 |  * @mask:  pad mask | 
 | 353 |  * | 
 | 354 |  * Update dma pad mask. | 
 | 355 |  * | 
 | 356 |  * Appending pad buffer to a request modifies the last entry of a | 
 | 357 |  * scatter list such that it includes the pad buffer. | 
 | 358 |  **/ | 
 | 359 | void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) | 
 | 360 | { | 
 | 361 | 	if (mask > q->dma_pad_mask) | 
 | 362 | 		q->dma_pad_mask = mask; | 
 | 363 | } | 
 | 364 | EXPORT_SYMBOL(blk_queue_update_dma_pad); | 
 | 365 |  | 
 | 366 | /** | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 367 |  * blk_queue_dma_drain - Set up a drain buffer for excess dma. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 368 |  * @q:  the request queue for the device | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 369 |  * @dma_drain_needed: fn which returns non-zero if drain is necessary | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 370 |  * @buf:	physically contiguous buffer | 
 | 371 |  * @size:	size of the buffer in bytes | 
 | 372 |  * | 
 | 373 |  * Some devices have excess DMA problems and can't simply discard (or | 
 | 374 |  * zero fill) the unwanted piece of the transfer.  They have to have a | 
 | 375 |  * real area of memory to transfer it into.  The use case for this is | 
 | 376 |  * ATAPI devices in DMA mode.  If the packet command causes a transfer | 
 | 377 |  * bigger than the transfer size some HBAs will lock up if there | 
 | 378 |  * aren't DMA elements to contain the excess transfer.  What this API | 
 | 379 |  * does is adjust the queue so that the buf is always appended | 
 | 380 |  * silently to the scatterlist. | 
 | 381 |  * | 
 | 382 |  * Note: This routine adjusts max_hw_segments to make room for | 
 | 383 |  * appending the drain buffer.  If you call | 
 | 384 |  * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | 
 | 385 |  * calling this routine, you must set the limit to one fewer than your | 
 | 386 |  * device can support otherwise there won't be room for the drain | 
 | 387 |  * buffer. | 
 | 388 |  */ | 
| Harvey Harrison | 448da4d | 2008-03-04 11:30:18 +0100 | [diff] [blame] | 389 | int blk_queue_dma_drain(struct request_queue *q, | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 390 | 			       dma_drain_needed_fn *dma_drain_needed, | 
 | 391 | 			       void *buf, unsigned int size) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 392 | { | 
 | 393 | 	if (q->max_hw_segments < 2 || q->max_phys_segments < 2) | 
 | 394 | 		return -EINVAL; | 
 | 395 | 	/* make room for appending the drain */ | 
 | 396 | 	--q->max_hw_segments; | 
 | 397 | 	--q->max_phys_segments; | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 398 | 	q->dma_drain_needed = dma_drain_needed; | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 399 | 	q->dma_drain_buffer = buf; | 
 | 400 | 	q->dma_drain_size = size; | 
 | 401 |  | 
 | 402 | 	return 0; | 
 | 403 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 404 | EXPORT_SYMBOL_GPL(blk_queue_dma_drain); | 
 | 405 |  | 
 | 406 | /** | 
 | 407 |  * blk_queue_segment_boundary - set boundary rules for segment merging | 
 | 408 |  * @q:  the request queue for the device | 
 | 409 |  * @mask:  the memory boundary mask | 
 | 410 |  **/ | 
 | 411 | void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) | 
 | 412 | { | 
 | 413 | 	if (mask < PAGE_CACHE_SIZE - 1) { | 
 | 414 | 		mask = PAGE_CACHE_SIZE - 1; | 
| Harvey Harrison | 24c03d4 | 2008-05-01 04:35:17 -0700 | [diff] [blame] | 415 | 		printk(KERN_INFO "%s: set to minimum %lx\n", | 
 | 416 | 		       __func__, mask); | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 417 | 	} | 
 | 418 |  | 
 | 419 | 	q->seg_boundary_mask = mask; | 
 | 420 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 421 | EXPORT_SYMBOL(blk_queue_segment_boundary); | 
 | 422 |  | 
 | 423 | /** | 
 | 424 |  * blk_queue_dma_alignment - set dma length and memory alignment | 
 | 425 |  * @q:     the request queue for the device | 
 | 426 |  * @mask:  alignment mask | 
 | 427 |  * | 
 | 428 |  * description: | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 429 |  *    set required memory and length alignment for direct dma transactions. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 430 |  *    this is used when buiding direct io requests for the queue. | 
 | 431 |  * | 
 | 432 |  **/ | 
 | 433 | void blk_queue_dma_alignment(struct request_queue *q, int mask) | 
 | 434 | { | 
 | 435 | 	q->dma_alignment = mask; | 
 | 436 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 437 | EXPORT_SYMBOL(blk_queue_dma_alignment); | 
 | 438 |  | 
 | 439 | /** | 
 | 440 |  * blk_queue_update_dma_alignment - update dma length and memory alignment | 
 | 441 |  * @q:     the request queue for the device | 
 | 442 |  * @mask:  alignment mask | 
 | 443 |  * | 
 | 444 |  * description: | 
| Randy Dunlap | 710027a | 2008-08-19 20:13:11 +0200 | [diff] [blame] | 445 |  *    update required memory and length alignment for direct dma transactions. | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 446 |  *    If the requested alignment is larger than the current alignment, then | 
 | 447 |  *    the current queue alignment is updated to the new value, otherwise it | 
 | 448 |  *    is left alone.  The design of this is to allow multiple objects | 
 | 449 |  *    (driver, device, transport etc) to set their respective | 
 | 450 |  *    alignments without having them interfere. | 
 | 451 |  * | 
 | 452 |  **/ | 
 | 453 | void blk_queue_update_dma_alignment(struct request_queue *q, int mask) | 
 | 454 | { | 
 | 455 | 	BUG_ON(mask > PAGE_SIZE); | 
 | 456 |  | 
 | 457 | 	if (mask > q->dma_alignment) | 
 | 458 | 		q->dma_alignment = mask; | 
 | 459 | } | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 460 | EXPORT_SYMBOL(blk_queue_update_dma_alignment); | 
 | 461 |  | 
| Harvey Harrison | aeb3d3a | 2008-08-28 09:27:42 +0200 | [diff] [blame] | 462 | static int __init blk_settings_init(void) | 
| Jens Axboe | 86db1e2 | 2008-01-29 14:53:40 +0100 | [diff] [blame] | 463 | { | 
 | 464 | 	blk_max_low_pfn = max_low_pfn - 1; | 
 | 465 | 	blk_max_pfn = max_pfn - 1; | 
 | 466 | 	return 0; | 
 | 467 | } | 
 | 468 | subsys_initcall(blk_settings_init); |