| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_BLKDEV_H | 
 | 2 | #define _LINUX_BLKDEV_H | 
 | 3 |  | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 4 | #ifdef CONFIG_BLOCK | 
 | 5 |  | 
| Andrew Morton | bcfd8d3 | 2006-08-31 12:56:06 +0200 | [diff] [blame] | 6 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/major.h> | 
 | 8 | #include <linux/genhd.h> | 
 | 9 | #include <linux/list.h> | 
 | 10 | #include <linux/timer.h> | 
 | 11 | #include <linux/workqueue.h> | 
 | 12 | #include <linux/pagemap.h> | 
 | 13 | #include <linux/backing-dev.h> | 
 | 14 | #include <linux/wait.h> | 
 | 15 | #include <linux/mempool.h> | 
 | 16 | #include <linux/bio.h> | 
 | 17 | #include <linux/module.h> | 
 | 18 | #include <linux/stringify.h> | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 19 | #include <linux/bsg.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 |  | 
 | 21 | #include <asm/scatterlist.h> | 
 | 22 |  | 
| Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 23 | struct scsi_ioctl_command; | 
 | 24 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | struct request_queue; | 
| Jens Axboe | 71f65e6 | 2007-07-24 10:29:42 +0200 | [diff] [blame] | 26 | typedef struct request_queue request_queue_t __deprecated; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | struct elevator_queue; | 
 | 28 | typedef struct elevator_queue elevator_t; | 
 | 29 | struct request_pm_state; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 30 | struct blk_trace; | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 31 | struct request; | 
 | 32 | struct sg_io_hdr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
 | 34 | #define BLKDEV_MIN_RQ	4 | 
 | 35 | #define BLKDEV_MAX_RQ	128	/* Default maximum */ | 
 | 36 |  | 
 | 37 | /* | 
 | 38 |  * This is the per-process anticipatory I/O scheduler state. | 
 | 39 |  */ | 
 | 40 | struct as_io_context { | 
 | 41 | 	spinlock_t lock; | 
 | 42 |  | 
 | 43 | 	void (*dtor)(struct as_io_context *aic); /* destructor */ | 
 | 44 | 	void (*exit)(struct as_io_context *aic); /* called on task exit */ | 
 | 45 |  | 
 | 46 | 	unsigned long state; | 
 | 47 | 	atomic_t nr_queued; /* queued reads & sync writes */ | 
 | 48 | 	atomic_t nr_dispatched; /* number of requests gone to the drivers */ | 
 | 49 |  | 
 | 50 | 	/* IO History tracking */ | 
 | 51 | 	/* Thinktime */ | 
 | 52 | 	unsigned long last_end_request; | 
 | 53 | 	unsigned long ttime_total; | 
 | 54 | 	unsigned long ttime_samples; | 
 | 55 | 	unsigned long ttime_mean; | 
 | 56 | 	/* Layout pattern */ | 
 | 57 | 	unsigned int seek_samples; | 
 | 58 | 	sector_t last_request_pos; | 
 | 59 | 	u64 seek_total; | 
 | 60 | 	sector_t seek_mean; | 
 | 61 | }; | 
 | 62 |  | 
 | 63 | struct cfq_queue; | 
 | 64 | struct cfq_io_context { | 
| Jens Axboe | e2d74ac | 2006-03-28 08:59:01 +0200 | [diff] [blame] | 65 | 	struct rb_node rb_node; | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 66 | 	void *key; | 
 | 67 |  | 
| Jens Axboe | e2d74ac | 2006-03-28 08:59:01 +0200 | [diff] [blame] | 68 | 	struct cfq_queue *cfqq[2]; | 
 | 69 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 70 | 	struct io_context *ioc; | 
 | 71 |  | 
 | 72 | 	unsigned long last_end_request; | 
| Jens Axboe | 206dc69 | 2006-03-28 13:03:44 +0200 | [diff] [blame] | 73 | 	sector_t last_request_pos; | 
| Jens Axboe | 206dc69 | 2006-03-28 13:03:44 +0200 | [diff] [blame] | 74 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 75 | 	unsigned long ttime_total; | 
 | 76 | 	unsigned long ttime_samples; | 
 | 77 | 	unsigned long ttime_mean; | 
 | 78 |  | 
| Jens Axboe | 206dc69 | 2006-03-28 13:03:44 +0200 | [diff] [blame] | 79 | 	unsigned int seek_samples; | 
 | 80 | 	u64 seek_total; | 
 | 81 | 	sector_t seek_mean; | 
 | 82 |  | 
| Al Viro | d9ff418 | 2006-03-18 13:51:22 -0500 | [diff] [blame] | 83 | 	struct list_head queue_list; | 
 | 84 |  | 
| Jens Axboe | e2d74ac | 2006-03-28 08:59:01 +0200 | [diff] [blame] | 85 | 	void (*dtor)(struct io_context *); /* destructor */ | 
 | 86 | 	void (*exit)(struct io_context *); /* called on task exit */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | }; | 
 | 88 |  | 
 | 89 | /* | 
 | 90 |  * This is the per-process I/O subsystem state.  It is refcounted and | 
 | 91 |  * kmalloc'ed. Currently all fields are modified in process io context | 
 | 92 |  * (apart from the atomic refcount), so require no locking. | 
 | 93 |  */ | 
 | 94 | struct io_context { | 
 | 95 | 	atomic_t refcount; | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 96 | 	struct task_struct *task; | 
 | 97 |  | 
| Jens Axboe | fc46379 | 2006-08-29 09:05:44 +0200 | [diff] [blame] | 98 | 	unsigned int ioprio_changed; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
 | 100 | 	/* | 
 | 101 | 	 * For request batching | 
 | 102 | 	 */ | 
 | 103 | 	unsigned long last_waited; /* Time last woken after wait for request */ | 
 | 104 | 	int nr_batch_requests;     /* Number of requests left in the batch */ | 
 | 105 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | 	struct as_io_context *aic; | 
| Jens Axboe | e2d74ac | 2006-03-28 08:59:01 +0200 | [diff] [blame] | 107 | 	struct rb_root cic_root; | 
| Jens Axboe | 4e521c2 | 2007-04-24 21:17:33 +0200 | [diff] [blame] | 108 | 	void *ioc_data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | }; | 
 | 110 |  | 
 | 111 | void put_io_context(struct io_context *ioc); | 
 | 112 | void exit_io_context(void); | 
| Jens Axboe | b5deef9 | 2006-07-19 23:39:40 +0200 | [diff] [blame] | 113 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 
 | 115 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 
 | 116 |  | 
 | 117 | struct request; | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 118 | typedef void (rq_end_io_fn)(struct request *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 |  | 
 | 120 | struct request_list { | 
 | 121 | 	int count[2]; | 
 | 122 | 	int starved[2]; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 123 | 	int elvpriv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | 	mempool_t *rq_pool; | 
 | 125 | 	wait_queue_head_t wait[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | }; | 
 | 127 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 128 | /* | 
 | 129 |  * request command types | 
 | 130 |  */ | 
 | 131 | enum rq_cmd_type_bits { | 
 | 132 | 	REQ_TYPE_FS		= 1,	/* fs request */ | 
 | 133 | 	REQ_TYPE_BLOCK_PC,		/* scsi command */ | 
 | 134 | 	REQ_TYPE_SENSE,			/* sense request */ | 
 | 135 | 	REQ_TYPE_PM_SUSPEND,		/* suspend request */ | 
 | 136 | 	REQ_TYPE_PM_RESUME,		/* resume request */ | 
 | 137 | 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */ | 
 | 138 | 	REQ_TYPE_FLUSH,			/* flush request */ | 
 | 139 | 	REQ_TYPE_SPECIAL,		/* driver defined type */ | 
 | 140 | 	REQ_TYPE_LINUX_BLOCK,		/* generic block layer message */ | 
 | 141 | 	/* | 
 | 142 | 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should | 
 | 143 | 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 
 | 144 | 	 * private REQ_LB opcodes to differentiate what type of request this is | 
 | 145 | 	 */ | 
 | 146 | 	REQ_TYPE_ATA_CMD, | 
 | 147 | 	REQ_TYPE_ATA_TASK, | 
 | 148 | 	REQ_TYPE_ATA_TASKFILE, | 
| Jens Axboe | cea2885 | 2006-10-12 15:08:45 +0200 | [diff] [blame] | 149 | 	REQ_TYPE_ATA_PC, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 150 | }; | 
 | 151 |  | 
 | 152 | /* | 
 | 153 |  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 
 | 154 |  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 
 | 155 |  * SCSI cdb. | 
 | 156 |  * | 
 | 157 |  * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | 
 | 158 |  * typically to differentiate REQ_TYPE_SPECIAL requests. | 
 | 159 |  * | 
 | 160 |  */ | 
 | 161 | enum { | 
 | 162 | 	/* | 
 | 163 | 	 * just examples for now | 
 | 164 | 	 */ | 
 | 165 | 	REQ_LB_OP_EJECT	= 0x40,		/* eject request */ | 
 | 166 | 	REQ_LB_OP_FLUSH = 0x41,		/* flush device */ | 
 | 167 | }; | 
 | 168 |  | 
 | 169 | /* | 
 | 170 |  * request type modified bits. first three bits match BIO_RW* bits, important | 
 | 171 |  */ | 
 | 172 | enum rq_flag_bits { | 
 | 173 | 	__REQ_RW,		/* not set, read. set, write */ | 
 | 174 | 	__REQ_FAILFAST,		/* no low level driver retries */ | 
 | 175 | 	__REQ_SORTED,		/* elevator knows about this request */ | 
 | 176 | 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */ | 
 | 177 | 	__REQ_HARDBARRIER,	/* may not be passed by drive either */ | 
 | 178 | 	__REQ_FUA,		/* forced unit access */ | 
 | 179 | 	__REQ_NOMERGE,		/* don't touch this for merging */ | 
 | 180 | 	__REQ_STARTED,		/* drive already may have started this one */ | 
 | 181 | 	__REQ_DONTPREP,		/* don't call prep for this one */ | 
 | 182 | 	__REQ_QUEUED,		/* uses queueing */ | 
 | 183 | 	__REQ_ELVPRIV,		/* elevator private data attached */ | 
 | 184 | 	__REQ_FAILED,		/* set if the request failed */ | 
 | 185 | 	__REQ_QUIET,		/* don't worry about errors */ | 
 | 186 | 	__REQ_PREEMPT,		/* set for "ide_preempt" requests */ | 
 | 187 | 	__REQ_ORDERED_COLOR,	/* is before or after barrier */ | 
 | 188 | 	__REQ_RW_SYNC,		/* request is sync (O_DIRECT) */ | 
| Jens Axboe | 49171e5 | 2006-08-10 08:59:11 +0200 | [diff] [blame] | 189 | 	__REQ_ALLOCED,		/* request came from our alloc pool */ | 
| Jens Axboe | 5404bc7 | 2006-08-10 09:01:02 +0200 | [diff] [blame] | 190 | 	__REQ_RW_META,		/* metadata io request */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 191 | 	__REQ_NR_BITS,		/* stops here */ | 
 | 192 | }; | 
 | 193 |  | 
 | 194 | #define REQ_RW		(1 << __REQ_RW) | 
 | 195 | #define REQ_FAILFAST	(1 << __REQ_FAILFAST) | 
 | 196 | #define REQ_SORTED	(1 << __REQ_SORTED) | 
 | 197 | #define REQ_SOFTBARRIER	(1 << __REQ_SOFTBARRIER) | 
 | 198 | #define REQ_HARDBARRIER	(1 << __REQ_HARDBARRIER) | 
 | 199 | #define REQ_FUA		(1 << __REQ_FUA) | 
 | 200 | #define REQ_NOMERGE	(1 << __REQ_NOMERGE) | 
 | 201 | #define REQ_STARTED	(1 << __REQ_STARTED) | 
 | 202 | #define REQ_DONTPREP	(1 << __REQ_DONTPREP) | 
 | 203 | #define REQ_QUEUED	(1 << __REQ_QUEUED) | 
 | 204 | #define REQ_ELVPRIV	(1 << __REQ_ELVPRIV) | 
 | 205 | #define REQ_FAILED	(1 << __REQ_FAILED) | 
 | 206 | #define REQ_QUIET	(1 << __REQ_QUIET) | 
 | 207 | #define REQ_PREEMPT	(1 << __REQ_PREEMPT) | 
 | 208 | #define REQ_ORDERED_COLOR	(1 << __REQ_ORDERED_COLOR) | 
 | 209 | #define REQ_RW_SYNC	(1 << __REQ_RW_SYNC) | 
| Jens Axboe | 49171e5 | 2006-08-10 08:59:11 +0200 | [diff] [blame] | 210 | #define REQ_ALLOCED	(1 << __REQ_ALLOCED) | 
| Jens Axboe | 5404bc7 | 2006-08-10 09:01:02 +0200 | [diff] [blame] | 211 | #define REQ_RW_META	(1 << __REQ_RW_META) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 212 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | #define BLK_MAX_CDB	16 | 
 | 214 |  | 
 | 215 | /* | 
 | 216 |  * try to put the fields that are referenced together in the same cacheline | 
 | 217 |  */ | 
 | 218 | struct request { | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 219 | 	struct list_head queuelist; | 
 | 220 | 	struct list_head donelist; | 
 | 221 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 222 | 	struct request_queue *q; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 223 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 224 | 	unsigned int cmd_flags; | 
 | 225 | 	enum rq_cmd_type_bits cmd_type; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 |  | 
 | 227 | 	/* Maintain bio traversal state for part by part I/O submission. | 
 | 228 | 	 * hard_* are block layer internals, no driver should touch them! | 
 | 229 | 	 */ | 
 | 230 |  | 
 | 231 | 	sector_t sector;		/* next sector to submit */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 232 | 	sector_t hard_sector;		/* next sector to complete */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | 	unsigned long nr_sectors;	/* no. of sectors left to submit */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 234 | 	unsigned long hard_nr_sectors;	/* no. of sectors left to complete */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 	/* no. of sectors left to submit in the current segment */ | 
 | 236 | 	unsigned int current_nr_sectors; | 
 | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | 	/* no. of sectors left to complete in the current segment */ | 
 | 239 | 	unsigned int hard_cur_sectors; | 
 | 240 |  | 
 | 241 | 	struct bio *bio; | 
 | 242 | 	struct bio *biotail; | 
 | 243 |  | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 244 | 	struct hlist_node hash;	/* merge hash */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 245 | 	/* | 
 | 246 | 	 * The rb_node is only used inside the io scheduler, requests | 
 | 247 | 	 * are pruned when moved to the dispatch queue. So let the | 
 | 248 | 	 * completion_data share space with the rb_node. | 
 | 249 | 	 */ | 
 | 250 | 	union { | 
 | 251 | 		struct rb_node rb_node;	/* sort/lookup */ | 
 | 252 | 		void *completion_data; | 
 | 253 | 	}; | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 254 |  | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 255 | 	/* | 
 | 256 | 	 * two pointers are available for the IO schedulers, if they need | 
 | 257 | 	 * more they have to dynamically allocate it. | 
 | 258 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 	void *elevator_private; | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 260 | 	void *elevator_private2; | 
 | 261 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 262 | 	struct gendisk *rq_disk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | 	unsigned long start_time; | 
 | 264 |  | 
 | 265 | 	/* Number of scatter-gather DMA addr+len pairs after | 
 | 266 | 	 * physical address coalescing is performed. | 
 | 267 | 	 */ | 
 | 268 | 	unsigned short nr_phys_segments; | 
 | 269 |  | 
 | 270 | 	/* Number of scatter-gather addr+len pairs after | 
 | 271 | 	 * physical and DMA remapping hardware coalescing is performed. | 
 | 272 | 	 * This is the number of scatter-gather entries the driver | 
 | 273 | 	 * will actually have to deal with after DMA mapping is done. | 
 | 274 | 	 */ | 
 | 275 | 	unsigned short nr_hw_segments; | 
 | 276 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 277 | 	unsigned short ioprio; | 
 | 278 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | 	void *special; | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 280 | 	char *buffer; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 |  | 
| Jens Axboe | cdd6026 | 2006-07-28 09:32:07 +0200 | [diff] [blame] | 282 | 	int tag; | 
 | 283 | 	int errors; | 
 | 284 |  | 
 | 285 | 	int ref_count; | 
 | 286 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 	/* | 
 | 288 | 	 * when request is used as a packet command carrier | 
 | 289 | 	 */ | 
 | 290 | 	unsigned int cmd_len; | 
 | 291 | 	unsigned char cmd[BLK_MAX_CDB]; | 
 | 292 |  | 
 | 293 | 	unsigned int data_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | 	unsigned int sense_len; | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 295 | 	void *data; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | 	void *sense; | 
 | 297 |  | 
 | 298 | 	unsigned int timeout; | 
| Mike Christie | 17e01f2 | 2005-11-11 05:31:37 -0600 | [diff] [blame] | 299 | 	int retries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 |  | 
 | 301 | 	/* | 
| Jens Axboe | c00895a | 2006-09-30 20:29:12 +0200 | [diff] [blame] | 302 | 	 * completion callback. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | 	 */ | 
 | 304 | 	rq_end_io_fn *end_io; | 
 | 305 | 	void *end_io_data; | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 306 |  | 
 | 307 | 	/* for bidi */ | 
 | 308 | 	struct request *next_rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | }; | 
 | 310 |  | 
 | 311 | /* | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 312 |  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 |  * requests. Some step values could eventually be made generic. | 
 | 314 |  */ | 
 | 315 | struct request_pm_state | 
 | 316 | { | 
 | 317 | 	/* PM state machine step value, currently driver specific */ | 
 | 318 | 	int	pm_step; | 
 | 319 | 	/* requested PM state value (S1, S2, S3, S4, ...) */ | 
 | 320 | 	u32	pm_state; | 
 | 321 | 	void*	data;		/* for driver use */ | 
 | 322 | }; | 
 | 323 |  | 
 | 324 | #include <linux/elevator.h> | 
 | 325 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 326 | typedef void (request_fn_proc) (struct request_queue *q); | 
 | 327 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 
 | 328 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 
 | 329 | typedef void (unplug_fn) (struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 |  | 
 | 331 | struct bio_vec; | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 332 | typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 333 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 334 | typedef void (softirq_done_fn)(struct request *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 |  | 
 | 336 | enum blk_queue_state { | 
 | 337 | 	Queue_down, | 
 | 338 | 	Queue_up, | 
 | 339 | }; | 
 | 340 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | struct blk_queue_tag { | 
 | 342 | 	struct request **tag_index;	/* map of busy tags */ | 
 | 343 | 	unsigned long *tag_map;		/* bit map of free/busy tags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | 	int busy;			/* current depth */ | 
 | 345 | 	int max_depth;			/* what we will send to device */ | 
| Tejun Heo | ba02508 | 2005-08-05 13:28:11 -0700 | [diff] [blame] | 346 | 	int real_max_depth;		/* what the array can hold */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | 	atomic_t refcnt;		/* map can be shared */ | 
 | 348 | }; | 
 | 349 |  | 
 | 350 | struct request_queue | 
 | 351 | { | 
 | 352 | 	/* | 
 | 353 | 	 * Together with queue_head for cacheline sharing | 
 | 354 | 	 */ | 
 | 355 | 	struct list_head	queue_head; | 
 | 356 | 	struct request		*last_merge; | 
 | 357 | 	elevator_t		*elevator; | 
 | 358 |  | 
 | 359 | 	/* | 
 | 360 | 	 * the queue request freelist, one for reads and one for writes | 
 | 361 | 	 */ | 
 | 362 | 	struct request_list	rq; | 
 | 363 |  | 
 | 364 | 	request_fn_proc		*request_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | 	make_request_fn		*make_request_fn; | 
 | 366 | 	prep_rq_fn		*prep_rq_fn; | 
 | 367 | 	unplug_fn		*unplug_fn; | 
 | 368 | 	merge_bvec_fn		*merge_bvec_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 	prepare_flush_fn	*prepare_flush_fn; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 370 | 	softirq_done_fn		*softirq_done_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 |  | 
 | 372 | 	/* | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 373 | 	 * Dispatch queue sorting | 
 | 374 | 	 */ | 
| Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 375 | 	sector_t		end_sector; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 376 | 	struct request		*boundary_rq; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 377 |  | 
 | 378 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 	 * Auto-unplugging state | 
 | 380 | 	 */ | 
 | 381 | 	struct timer_list	unplug_timer; | 
 | 382 | 	int			unplug_thresh;	/* After this many requests */ | 
 | 383 | 	unsigned long		unplug_delay;	/* After this many jiffies */ | 
 | 384 | 	struct work_struct	unplug_work; | 
 | 385 |  | 
 | 386 | 	struct backing_dev_info	backing_dev_info; | 
 | 387 |  | 
 | 388 | 	/* | 
 | 389 | 	 * The queue owner gets to use this for whatever they like. | 
 | 390 | 	 * ll_rw_blk doesn't touch it. | 
 | 391 | 	 */ | 
 | 392 | 	void			*queuedata; | 
 | 393 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | 	/* | 
 | 395 | 	 * queue needs bounce pages for pages above this limit | 
 | 396 | 	 */ | 
 | 397 | 	unsigned long		bounce_pfn; | 
| Al Viro | 8267e26 | 2005-10-21 03:20:53 -0400 | [diff] [blame] | 398 | 	gfp_t			bounce_gfp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 |  | 
 | 400 | 	/* | 
 | 401 | 	 * various queue flags, see QUEUE_* below | 
 | 402 | 	 */ | 
 | 403 | 	unsigned long		queue_flags; | 
 | 404 |  | 
 | 405 | 	/* | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 406 | 	 * protects queue structures from reentrancy. ->__queue_lock should | 
 | 407 | 	 * _never_ be used directly, it is queue private. always use | 
 | 408 | 	 * ->queue_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | 	 */ | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 410 | 	spinlock_t		__queue_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | 	spinlock_t		*queue_lock; | 
 | 412 |  | 
 | 413 | 	/* | 
 | 414 | 	 * queue kobject | 
 | 415 | 	 */ | 
 | 416 | 	struct kobject kobj; | 
 | 417 |  | 
 | 418 | 	/* | 
 | 419 | 	 * queue settings | 
 | 420 | 	 */ | 
 | 421 | 	unsigned long		nr_requests;	/* Max # of requests */ | 
 | 422 | 	unsigned int		nr_congestion_on; | 
 | 423 | 	unsigned int		nr_congestion_off; | 
 | 424 | 	unsigned int		nr_batching; | 
 | 425 |  | 
| Jens Axboe | 2cb2e14 | 2006-01-17 09:04:32 +0100 | [diff] [blame] | 426 | 	unsigned int		max_sectors; | 
 | 427 | 	unsigned int		max_hw_sectors; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | 	unsigned short		max_phys_segments; | 
 | 429 | 	unsigned short		max_hw_segments; | 
 | 430 | 	unsigned short		hardsect_size; | 
 | 431 | 	unsigned int		max_segment_size; | 
 | 432 |  | 
 | 433 | 	unsigned long		seg_boundary_mask; | 
 | 434 | 	unsigned int		dma_alignment; | 
 | 435 |  | 
 | 436 | 	struct blk_queue_tag	*queue_tags; | 
| Jens Axboe | 6eca900 | 2007-10-25 10:14:47 +0200 | [diff] [blame] | 437 | 	struct list_head	tag_busy_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 |  | 
| Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 439 | 	unsigned int		nr_sorted; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | 	unsigned int		in_flight; | 
 | 441 |  | 
 | 442 | 	/* | 
 | 443 | 	 * sg stuff | 
 | 444 | 	 */ | 
 | 445 | 	unsigned int		sg_timeout; | 
 | 446 | 	unsigned int		sg_reserved_size; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 447 | 	int			node; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 448 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 449 | 	struct blk_trace	*blk_trace; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 450 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | 	/* | 
 | 452 | 	 * reserved for flush operations | 
 | 453 | 	 */ | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 454 | 	unsigned int		ordered, next_ordered, ordseq; | 
 | 455 | 	int			orderr, ordcolor; | 
 | 456 | 	struct request		pre_flush_rq, bar_rq, post_flush_rq; | 
 | 457 | 	struct request		*orig_bar_rq; | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 458 |  | 
 | 459 | 	struct mutex		sysfs_lock; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 460 |  | 
 | 461 | #if defined(CONFIG_BLK_DEV_BSG) | 
 | 462 | 	struct bsg_class_device bsg_dev; | 
 | 463 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | }; | 
 | 465 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | #define QUEUE_FLAG_CLUSTER	0	/* cluster several segments into 1 */ | 
 | 467 | #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */ | 
 | 468 | #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */ | 
| Qi Yong | 4e97182 | 2007-07-25 08:45:51 +0200 | [diff] [blame] | 469 | #define	QUEUE_FLAG_READFULL	3	/* read queue has been filled */ | 
 | 470 | #define QUEUE_FLAG_WRITEFULL	4	/* write queue has been filled */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | #define QUEUE_FLAG_DEAD		5	/* queue being torn down */ | 
 | 472 | #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */ | 
 | 473 | #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */ | 
| Jens Axboe | 64521d1 | 2005-10-28 08:30:39 +0200 | [diff] [blame] | 474 | #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */ | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 475 | #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */ | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 476 |  | 
 | 477 | enum { | 
 | 478 | 	/* | 
 | 479 | 	 * Hardbarrier is supported with one of the following methods. | 
 | 480 | 	 * | 
 | 481 | 	 * NONE		: hardbarrier unsupported | 
 | 482 | 	 * DRAIN	: ordering by draining is enough | 
 | 483 | 	 * DRAIN_FLUSH	: ordering by draining w/ pre and post flushes | 
 | 484 | 	 * DRAIN_FUA	: ordering by draining w/ pre flush and FUA write | 
 | 485 | 	 * TAG		: ordering by tag is enough | 
 | 486 | 	 * TAG_FLUSH	: ordering by tag w/ pre and post flushes | 
 | 487 | 	 * TAG_FUA	: ordering by tag w/ pre flush and FUA write | 
 | 488 | 	 */ | 
 | 489 | 	QUEUE_ORDERED_NONE	= 0x00, | 
 | 490 | 	QUEUE_ORDERED_DRAIN	= 0x01, | 
 | 491 | 	QUEUE_ORDERED_TAG	= 0x02, | 
 | 492 |  | 
 | 493 | 	QUEUE_ORDERED_PREFLUSH	= 0x10, | 
 | 494 | 	QUEUE_ORDERED_POSTFLUSH	= 0x20, | 
 | 495 | 	QUEUE_ORDERED_FUA	= 0x40, | 
 | 496 |  | 
 | 497 | 	QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | 
 | 498 | 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 
 | 499 | 	QUEUE_ORDERED_DRAIN_FUA	= QUEUE_ORDERED_DRAIN | | 
 | 500 | 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 
 | 501 | 	QUEUE_ORDERED_TAG_FLUSH	= QUEUE_ORDERED_TAG | | 
 | 502 | 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 
 | 503 | 	QUEUE_ORDERED_TAG_FUA	= QUEUE_ORDERED_TAG | | 
 | 504 | 			QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 
 | 505 |  | 
 | 506 | 	/* | 
 | 507 | 	 * Ordered operation sequence | 
 | 508 | 	 */ | 
 | 509 | 	QUEUE_ORDSEQ_STARTED	= 0x01,	/* flushing in progress */ | 
 | 510 | 	QUEUE_ORDSEQ_DRAIN	= 0x02,	/* waiting for the queue to be drained */ | 
 | 511 | 	QUEUE_ORDSEQ_PREFLUSH	= 0x04,	/* pre-flushing in progress */ | 
 | 512 | 	QUEUE_ORDSEQ_BAR	= 0x08,	/* original barrier req in progress */ | 
 | 513 | 	QUEUE_ORDSEQ_POSTFLUSH	= 0x10,	/* post-flushing in progress */ | 
 | 514 | 	QUEUE_ORDSEQ_DONE	= 0x20, | 
 | 515 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 |  | 
 | 517 | #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 
 | 518 | #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 
 | 519 | #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 520 | #define blk_queue_flushing(q)	((q)->ordseq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 522 | #define blk_fs_request(rq)	((rq)->cmd_type == REQ_TYPE_FS) | 
 | 523 | #define blk_pc_request(rq)	((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 
 | 524 | #define blk_special_request(rq)	((rq)->cmd_type == REQ_TYPE_SPECIAL) | 
 | 525 | #define blk_sense_request(rq)	((rq)->cmd_type == REQ_TYPE_SENSE) | 
 | 526 |  | 
 | 527 | #define blk_noretry_request(rq)	((rq)->cmd_flags & REQ_FAILFAST) | 
 | 528 | #define blk_rq_started(rq)	((rq)->cmd_flags & REQ_STARTED) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 |  | 
 | 530 | #define blk_account_rq(rq)	(blk_rq_started(rq) && blk_fs_request(rq)) | 
 | 531 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 532 | #define blk_pm_suspend_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | 
 | 533 | #define blk_pm_resume_request(rq)	((rq)->cmd_type == REQ_TYPE_PM_RESUME) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | #define blk_pm_request(rq)	\ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 535 | 	(blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 537 | #define blk_sorted_rq(rq)	((rq)->cmd_flags & REQ_SORTED) | 
 | 538 | #define blk_barrier_rq(rq)	((rq)->cmd_flags & REQ_HARDBARRIER) | 
 | 539 | #define blk_fua_rq(rq)		((rq)->cmd_flags & REQ_FUA) | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 540 | #define blk_bidi_rq(rq)		((rq)->next_rq != NULL) | 
| Jens Axboe | bf2de6f | 2007-09-27 13:01:25 +0200 | [diff] [blame] | 541 | #define blk_empty_barrier(rq)	(blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 |  | 
 | 543 | #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist) | 
 | 544 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 545 | #define rq_data_dir(rq)		((rq)->cmd_flags & 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 547 | /* | 
 | 548 |  * We regard a request as sync, if it's a READ or a SYNC write. | 
 | 549 |  */ | 
 | 550 | #define rq_is_sync(rq)		(rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 
| Jens Axboe | 5404bc7 | 2006-08-10 09:01:02 +0200 | [diff] [blame] | 551 | #define rq_is_meta(rq)		((rq)->cmd_flags & REQ_RW_META) | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 552 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | static inline int blk_queue_full(struct request_queue *q, int rw) | 
 | 554 | { | 
 | 555 | 	if (rw == READ) | 
 | 556 | 		return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 
 | 557 | 	return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 
 | 558 | } | 
 | 559 |  | 
 | 560 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 
 | 561 | { | 
 | 562 | 	if (rw == READ) | 
 | 563 | 		set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 
 | 564 | 	else | 
 | 565 | 		set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 
 | 566 | } | 
 | 567 |  | 
 | 568 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 
 | 569 | { | 
 | 570 | 	if (rw == READ) | 
 | 571 | 		clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 
 | 572 | 	else | 
 | 573 | 		clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 
 | 574 | } | 
 | 575 |  | 
 | 576 |  | 
 | 577 | /* | 
 | 578 |  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may | 
 | 579 |  * it already be started by driver. | 
 | 580 |  */ | 
 | 581 | #define RQ_NOMERGE_FLAGS	\ | 
 | 582 | 	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 
 | 583 | #define rq_mergeable(rq)	\ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 584 | 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 |  | 
 | 586 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 |  * q->prep_rq_fn return values | 
 | 588 |  */ | 
 | 589 | #define BLKPREP_OK		0	/* serve it */ | 
 | 590 | #define BLKPREP_KILL		1	/* fatal error, kill */ | 
 | 591 | #define BLKPREP_DEFER		2	/* leave on queue */ | 
 | 592 |  | 
 | 593 | extern unsigned long blk_max_low_pfn, blk_max_pfn; | 
 | 594 |  | 
 | 595 | /* | 
 | 596 |  * standard bounce addresses: | 
 | 597 |  * | 
 | 598 |  * BLK_BOUNCE_HIGH	: bounce all highmem pages | 
 | 599 |  * BLK_BOUNCE_ANY	: don't bounce anything | 
 | 600 |  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary | 
 | 601 |  */ | 
 | 602 | #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT) | 
 | 603 | #define BLK_BOUNCE_ANY		((u64)blk_max_pfn << PAGE_SHIFT) | 
 | 604 | #define BLK_BOUNCE_ISA		(ISA_DMA_THRESHOLD) | 
 | 605 |  | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 606 | /* | 
 | 607 |  * default timeout for SG_IO if none specified | 
 | 608 |  */ | 
 | 609 | #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ) | 
 | 610 |  | 
| Christoph Lameter | 2a7326b | 2007-07-17 04:03:37 -0700 | [diff] [blame] | 611 | #ifdef CONFIG_BOUNCE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | extern int init_emergency_isa_pool(void); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 613 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | #else | 
 | 615 | static inline int init_emergency_isa_pool(void) | 
 | 616 | { | 
 | 617 | 	return 0; | 
 | 618 | } | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 619 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | { | 
 | 621 | } | 
 | 622 | #endif /* CONFIG_MMU */ | 
 | 623 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 624 | struct req_iterator { | 
 | 625 | 	int i; | 
 | 626 | 	struct bio *bio; | 
 | 627 | }; | 
 | 628 |  | 
 | 629 | /* This should not be used directly - use rq_for_each_segment */ | 
 | 630 | #define __rq_for_each_bio(_bio, rq)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | 	if ((rq->bio))			\ | 
 | 632 | 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 
 | 633 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 634 | #define rq_for_each_segment(bvl, _rq, _iter)			\ | 
 | 635 | 	__rq_for_each_bio(_iter.bio, _rq)			\ | 
 | 636 | 		bio_for_each_segment(bvl, _iter.bio, _iter.i) | 
 | 637 |  | 
 | 638 | #define rq_iter_last(rq, _iter)					\ | 
 | 639 | 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 
 | 640 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | extern int blk_register_queue(struct gendisk *disk); | 
 | 642 | extern void blk_unregister_queue(struct gendisk *disk); | 
 | 643 | extern void register_disk(struct gendisk *dev); | 
 | 644 | extern void generic_make_request(struct bio *bio); | 
 | 645 | extern void blk_put_request(struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 646 | extern void __blk_put_request(struct request_queue *, struct request *); | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 647 | extern void blk_end_sync_rq(struct request *rq, int error); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 648 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 
 | 649 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 
 | 650 | extern void blk_requeue_request(struct request_queue *, struct request *); | 
 | 651 | extern void blk_plug_device(struct request_queue *); | 
 | 652 | extern int blk_remove_plug(struct request_queue *); | 
 | 653 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 
| FUJITA Tomonori | 45e79a3 | 2007-07-09 12:39:20 +0200 | [diff] [blame] | 654 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 
 | 655 | 			  struct gendisk *, unsigned int, void __user *); | 
| Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 656 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 
 | 657 | 		struct gendisk *, struct scsi_ioctl_command __user *); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 658 |  | 
 | 659 | /* | 
| Jens Axboe | 1aa4f24 | 2006-12-19 08:33:11 +0100 | [diff] [blame] | 660 |  * Temporary export, until SCSI gets fixed up. | 
 | 661 |  */ | 
| NeilBrown | 3001ca7 | 2007-08-16 13:31:27 +0200 | [diff] [blame] | 662 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 
 | 663 | 			     struct bio *bio); | 
| Jens Axboe | 1aa4f24 | 2006-12-19 08:33:11 +0100 | [diff] [blame] | 664 |  | 
 | 665 | /* | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 666 |  * A queue has just exitted congestion.  Note this in the global counter of | 
 | 667 |  * congested queues, and wake up anyone who was waiting for requests to be | 
 | 668 |  * put back. | 
 | 669 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 670 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 671 | { | 
 | 672 | 	clear_bdi_congested(&q->backing_dev_info, rw); | 
 | 673 | } | 
 | 674 |  | 
 | 675 | /* | 
 | 676 |  * A queue has just entered congestion.  Flag that in the queue's VM-visible | 
 | 677 |  * state flags and increment the global gounter of congested queues. | 
 | 678 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 679 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 680 | { | 
 | 681 | 	set_bdi_congested(&q->backing_dev_info, rw); | 
 | 682 | } | 
 | 683 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 684 | extern void blk_start_queue(struct request_queue *q); | 
 | 685 | extern void blk_stop_queue(struct request_queue *q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | extern void blk_sync_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 687 | extern void __blk_stop_queue(struct request_queue *q); | 
 | 688 | extern void blk_run_queue(struct request_queue *); | 
 | 689 | extern void blk_start_queueing(struct request_queue *); | 
 | 690 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); | 
| Jens Axboe | 8e5cfc4 | 2006-12-19 11:12:46 +0100 | [diff] [blame] | 691 | extern int blk_rq_unmap_user(struct bio *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 692 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 
 | 693 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 
| Mike Christie | 0e75f90 | 2006-12-01 10:40:55 +0100 | [diff] [blame] | 694 | 			       struct sg_iovec *, int, unsigned int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 695 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 
| James Bottomley  | 994ca9a | 2005-06-20 14:11:09 +0200 | [diff] [blame] | 696 | 			  struct request *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 697 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 
| Jens Axboe | 15fc858 | 2006-01-06 10:00:50 +0100 | [diff] [blame] | 698 | 				  struct request *, int, rq_end_io_fn *); | 
| FUJITA Tomonori | 337ad41 | 2006-12-20 11:18:54 +0100 | [diff] [blame] | 699 | extern int blk_verify_command(unsigned char *, int); | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 700 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 701 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 702 | { | 
 | 703 | 	return bdev->bd_disk->queue; | 
 | 704 | } | 
 | 705 |  | 
 | 706 | static inline void blk_run_backing_dev(struct backing_dev_info *bdi, | 
 | 707 | 				       struct page *page) | 
 | 708 | { | 
 | 709 | 	if (bdi && bdi->unplug_io_fn) | 
 | 710 | 		bdi->unplug_io_fn(bdi, page); | 
 | 711 | } | 
 | 712 |  | 
 | 713 | static inline void blk_run_address_space(struct address_space *mapping) | 
 | 714 | { | 
 | 715 | 	if (mapping) | 
 | 716 | 		blk_run_backing_dev(mapping->backing_dev_info, NULL); | 
 | 717 | } | 
 | 718 |  | 
 | 719 | /* | 
 | 720 |  * end_request() and friends. Must be called with the request queue spinlock | 
 | 721 |  * acquired. All functions called within end_request() _must_be_ atomic. | 
 | 722 |  * | 
 | 723 |  * Several drivers define their own end_request and call | 
 | 724 |  * end_that_request_first() and end_that_request_last() | 
 | 725 |  * for parts of the original function. This prevents | 
 | 726 |  * code duplication in drivers. | 
 | 727 |  */ | 
 | 728 | extern int end_that_request_first(struct request *, int, int); | 
 | 729 | extern int end_that_request_chunk(struct request *, int, int); | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 730 | extern void end_that_request_last(struct request *, int); | 
| Jens Axboe | a0cd128 | 2007-09-21 10:41:07 +0200 | [diff] [blame] | 731 | extern void end_request(struct request *, int); | 
 | 732 | extern void end_queued_request(struct request *, int); | 
 | 733 | extern void end_dequeued_request(struct request *, int); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 734 | extern void blk_complete_request(struct request *); | 
 | 735 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | /* | 
 | 737 |  * end_that_request_first/chunk() takes an uptodate argument. we account | 
 | 738 |  * any value <= as an io error. 0 means -EIO for compatability reasons, | 
 | 739 |  * any other < 0 value is the direct error type. An uptodate value of | 
 | 740 |  * 1 indicates successful io completion | 
 | 741 |  */ | 
 | 742 | #define end_io_error(uptodate)	(unlikely((uptodate) <= 0)) | 
 | 743 |  | 
 | 744 | static inline void blkdev_dequeue_request(struct request *req) | 
 | 745 | { | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 746 | 	elv_dequeue_request(req->q, req); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | /* | 
 | 750 |  * Access functions for manipulating queue properties | 
 | 751 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 752 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 753 | 					spinlock_t *lock, int node_id); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 754 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 
 | 755 | extern void blk_cleanup_queue(struct request_queue *); | 
 | 756 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 
 | 757 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 
 | 758 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 
 | 759 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 
 | 760 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 
 | 761 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 
 | 762 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 
 | 763 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 
 | 764 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 
 | 765 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 
 | 766 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 
 | 767 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 
 | 768 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 770 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 771 | extern int blk_do_ordered(struct request_queue *, struct request **); | 
 | 772 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 773 | extern unsigned blk_ordered_req_seq(struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 774 | extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 776 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | extern void blk_dump_rq_flags(struct request *, char *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 778 | extern void generic_unplug_device(struct request_queue *); | 
 | 779 | extern void __generic_unplug_device(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | extern long nr_blockdev_pages(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 782 | int blk_get_queue(struct request_queue *); | 
 | 783 | struct request_queue *blk_alloc_queue(gfp_t); | 
 | 784 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 
 | 785 | extern void blk_put_queue(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 |  | 
 | 787 | /* | 
 | 788 |  * tag stuff | 
 | 789 |  */ | 
 | 790 | #define blk_queue_tag_depth(q)		((q)->queue_tags->busy) | 
 | 791 | #define blk_queue_tag_queue(q)		((q)->queue_tags->busy < (q)->queue_tags->max_depth) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 792 | #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED) | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 793 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 
 | 794 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 
 | 795 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 
 | 796 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); | 
 | 797 | extern void blk_queue_free_tags(struct request_queue *); | 
 | 798 | extern int blk_queue_resize_tags(struct request_queue *, int); | 
 | 799 | extern void blk_queue_invalidate_tags(struct request_queue *); | 
| James Bottomley | 492dfb4 | 2006-08-30 15:48:45 -0400 | [diff] [blame] | 800 | extern struct blk_queue_tag *blk_init_tags(int); | 
 | 801 | extern void blk_free_tags(struct blk_queue_tag *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 |  | 
| David C Somayajulu | f583f49 | 2006-10-04 08:27:25 +0200 | [diff] [blame] | 803 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | 
 | 804 | 						int tag) | 
 | 805 | { | 
 | 806 | 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | 
 | 807 | 		return NULL; | 
 | 808 | 	return bqt->tag_index[tag]; | 
 | 809 | } | 
 | 810 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 
 | 812 |  | 
 | 813 | #define MAX_PHYS_SEGMENTS 128 | 
 | 814 | #define MAX_HW_SEGMENTS 128 | 
| Mike Christie | defd94b | 2005-12-05 02:37:06 -0600 | [diff] [blame] | 815 | #define SAFE_MAX_SECTORS 255 | 
 | 816 | #define BLK_DEF_MAX_SECTORS 1024 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 |  | 
 | 818 | #define MAX_SEGMENT_SIZE	65536 | 
 | 819 |  | 
 | 820 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 
 | 821 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 822 | static inline int queue_hardsect_size(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | { | 
 | 824 | 	int retval = 512; | 
 | 825 |  | 
 | 826 | 	if (q && q->hardsect_size) | 
 | 827 | 		retval = q->hardsect_size; | 
 | 828 |  | 
 | 829 | 	return retval; | 
 | 830 | } | 
 | 831 |  | 
 | 832 | static inline int bdev_hardsect_size(struct block_device *bdev) | 
 | 833 | { | 
 | 834 | 	return queue_hardsect_size(bdev_get_queue(bdev)); | 
 | 835 | } | 
 | 836 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 837 | static inline int queue_dma_alignment(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 838 | { | 
 | 839 | 	int retval = 511; | 
 | 840 |  | 
 | 841 | 	if (q && q->dma_alignment) | 
 | 842 | 		retval = q->dma_alignment; | 
 | 843 |  | 
 | 844 | 	return retval; | 
 | 845 | } | 
 | 846 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | /* assumes size > 256 */ | 
 | 848 | static inline unsigned int blksize_bits(unsigned int size) | 
 | 849 | { | 
 | 850 | 	unsigned int bits = 8; | 
 | 851 | 	do { | 
 | 852 | 		bits++; | 
 | 853 | 		size >>= 1; | 
 | 854 | 	} while (size > 256); | 
 | 855 | 	return bits; | 
 | 856 | } | 
 | 857 |  | 
| Adrian Bunk | 2befb9e | 2005-09-10 00:27:17 -0700 | [diff] [blame] | 858 | static inline unsigned int block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | { | 
 | 860 | 	return bdev->bd_block_size; | 
 | 861 | } | 
 | 862 |  | 
 | 863 | typedef struct {struct page *v;} Sector; | 
 | 864 |  | 
 | 865 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 
 | 866 |  | 
 | 867 | static inline void put_dev_sector(Sector p) | 
 | 868 | { | 
 | 869 | 	page_cache_release(p.v); | 
 | 870 | } | 
 | 871 |  | 
 | 872 | struct work_struct; | 
 | 873 | int kblockd_schedule_work(struct work_struct *work); | 
| Andrew Morton | 19a75d8 | 2007-05-09 02:33:56 -0700 | [diff] [blame] | 874 | void kblockd_flush_work(struct work_struct *work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 
 | 877 | 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 
 | 878 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 
 | 879 | 	MODULE_ALIAS("block-major-" __stringify(major) "-*") | 
 | 880 |  | 
 | 881 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 882 | #else /* CONFIG_BLOCK */ | 
 | 883 | /* | 
 | 884 |  * stubs for when the block layer is configured out | 
 | 885 |  */ | 
 | 886 | #define buffer_heads_over_limit 0 | 
 | 887 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 888 | static inline long nr_blockdev_pages(void) | 
 | 889 | { | 
 | 890 | 	return 0; | 
 | 891 | } | 
 | 892 |  | 
| Andrew Morton | bcfd8d3 | 2006-08-31 12:56:06 +0200 | [diff] [blame] | 893 | static inline void exit_io_context(void) | 
 | 894 | { | 
 | 895 | } | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 896 |  | 
 | 897 | #endif /* CONFIG_BLOCK */ | 
 | 898 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | #endif |