| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_BLKDEV_H | 
 | 2 | #define _LINUX_BLKDEV_H | 
 | 3 |  | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 4 | #ifdef CONFIG_BLOCK | 
 | 5 |  | 
| Andrew Morton | bcfd8d3 | 2006-08-31 12:56:06 +0200 | [diff] [blame] | 6 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/major.h> | 
 | 8 | #include <linux/genhd.h> | 
 | 9 | #include <linux/list.h> | 
 | 10 | #include <linux/timer.h> | 
 | 11 | #include <linux/workqueue.h> | 
 | 12 | #include <linux/pagemap.h> | 
 | 13 | #include <linux/backing-dev.h> | 
 | 14 | #include <linux/wait.h> | 
 | 15 | #include <linux/mempool.h> | 
 | 16 | #include <linux/bio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/stringify.h> | 
| Hugh Dickins | 3e6053d | 2008-09-11 10:57:55 +0200 | [diff] [blame] | 18 | #include <linux/gfp.h> | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 19 | #include <linux/bsg.h> | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 20 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 |  | 
 | 22 | #include <asm/scatterlist.h> | 
 | 23 |  | 
| Paul Gortmaker | de47725 | 2011-05-26 13:46:22 -0400 | [diff] [blame] | 24 | struct module; | 
| Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 25 | struct scsi_ioctl_command; | 
 | 26 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | struct request_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | struct elevator_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | struct request_pm_state; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 30 | struct blk_trace; | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 31 | struct request; | 
 | 32 | struct sg_io_hdr; | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 33 | struct bsg_job; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 |  | 
 | 35 | #define BLKDEV_MIN_RQ	4 | 
 | 36 | #define BLKDEV_MAX_RQ	128	/* Default maximum */ | 
 | 37 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | struct request; | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 39 | typedef void (rq_end_io_fn)(struct request *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 |  | 
 | 41 | struct request_list { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 42 | 	/* | 
 | 43 | 	 * count[], starved[], and wait[] are indexed by | 
 | 44 | 	 * BLK_RW_SYNC/BLK_RW_ASYNC | 
 | 45 | 	 */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | 	int count[2]; | 
 | 47 | 	int starved[2]; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 48 | 	int elvpriv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | 	mempool_t *rq_pool; | 
 | 50 | 	wait_queue_head_t wait[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | }; | 
 | 52 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 53 | /* | 
 | 54 |  * request command types | 
 | 55 |  */ | 
 | 56 | enum rq_cmd_type_bits { | 
 | 57 | 	REQ_TYPE_FS		= 1,	/* fs request */ | 
 | 58 | 	REQ_TYPE_BLOCK_PC,		/* scsi command */ | 
 | 59 | 	REQ_TYPE_SENSE,			/* sense request */ | 
 | 60 | 	REQ_TYPE_PM_SUSPEND,		/* suspend request */ | 
 | 61 | 	REQ_TYPE_PM_RESUME,		/* resume request */ | 
 | 62 | 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 63 | 	REQ_TYPE_SPECIAL,		/* driver defined type */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 64 | 	/* | 
 | 65 | 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should | 
 | 66 | 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 
 | 67 | 	 * private REQ_LB opcodes to differentiate what type of request this is | 
 | 68 | 	 */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 69 | 	REQ_TYPE_ATA_TASKFILE, | 
| Jens Axboe | cea2885 | 2006-10-12 15:08:45 +0200 | [diff] [blame] | 70 | 	REQ_TYPE_ATA_PC, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 71 | }; | 
 | 72 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | #define BLK_MAX_CDB	16 | 
 | 74 |  | 
 | 75 | /* | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 76 |  * try to put the fields that are referenced together in the same cacheline. | 
| Wanlong Gao | 4d0d98b6 | 2011-06-13 10:45:38 +0200 | [diff] [blame] | 77 |  * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 78 |  * as well! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  */ | 
 | 80 | struct request { | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 81 | 	struct list_head queuelist; | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 82 | 	struct call_single_data csd; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 83 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 84 | 	struct request_queue *q; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 85 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 86 | 	unsigned int cmd_flags; | 
 | 87 | 	enum rq_cmd_type_bits cmd_type; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 88 | 	unsigned long atomic_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 90 | 	int cpu; | 
 | 91 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 92 | 	/* the following two fields are internal, NEVER access directly */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 93 | 	unsigned int __data_len;	/* total data len */ | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 94 | 	sector_t __sector;		/* sector cursor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  | 
 | 96 | 	struct bio *bio; | 
 | 97 | 	struct bio *biotail; | 
 | 98 |  | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 99 | 	struct hlist_node hash;	/* merge hash */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 100 | 	/* | 
 | 101 | 	 * The rb_node is only used inside the io scheduler, requests | 
 | 102 | 	 * are pruned when moved to the dispatch queue. So let the | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 103 | 	 * completion_data share space with the rb_node. | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 104 | 	 */ | 
 | 105 | 	union { | 
 | 106 | 		struct rb_node rb_node;	/* sort/lookup */ | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 107 | 		void *completion_data; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 108 | 	}; | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 109 |  | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 110 | 	/* | 
| Vivek Goyal | 7f1dc8a | 2010-04-21 17:44:16 +0200 | [diff] [blame] | 111 | 	 * Three pointers are available for the IO schedulers, if they need | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 112 | 	 * more they have to dynamically allocate it.  Flush requests are | 
 | 113 | 	 * never put on the IO scheduler. So let the flush fields share | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 114 | 	 * space with the elevator data. | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 115 | 	 */ | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 116 | 	union { | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 117 | 		struct { | 
 | 118 | 			struct io_cq		*icq; | 
 | 119 | 			void			*priv[2]; | 
 | 120 | 		} elv; | 
 | 121 |  | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 122 | 		struct { | 
 | 123 | 			unsigned int		seq; | 
 | 124 | 			struct list_head	list; | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 125 | 			rq_end_io_fn		*saved_end_io; | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 126 | 		} flush; | 
 | 127 | 	}; | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 128 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 129 | 	struct gendisk *rq_disk; | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 130 | 	struct hd_struct *part; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | 	unsigned long start_time; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 132 | #ifdef CONFIG_BLK_CGROUP | 
 | 133 | 	unsigned long long start_time_ns; | 
 | 134 | 	unsigned long long io_start_time_ns;    /* when passed to hardware */ | 
 | 135 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | 	/* Number of scatter-gather DMA addr+len pairs after | 
 | 137 | 	 * physical address coalescing is performed. | 
 | 138 | 	 */ | 
 | 139 | 	unsigned short nr_phys_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 140 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
 | 141 | 	unsigned short nr_integrity_segments; | 
 | 142 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 144 | 	unsigned short ioprio; | 
 | 145 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 146 | 	int ref_count; | 
 | 147 |  | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 148 | 	void *special;		/* opaque pointer available for LLD use */ | 
 | 149 | 	char *buffer;		/* kaddr of the current segment if available */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 |  | 
| Jens Axboe | cdd6026 | 2006-07-28 09:32:07 +0200 | [diff] [blame] | 151 | 	int tag; | 
 | 152 | 	int errors; | 
 | 153 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | 	/* | 
 | 155 | 	 * when request is used as a packet command carrier | 
 | 156 | 	 */ | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 157 | 	unsigned char __cmd[BLK_MAX_CDB]; | 
 | 158 | 	unsigned char *cmd; | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 159 | 	unsigned short cmd_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
| FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 161 | 	unsigned int extra_len;	/* length of alignment and padding */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | 	unsigned int sense_len; | 
| Tejun Heo | c3a4d78 | 2009-05-07 22:24:37 +0900 | [diff] [blame] | 163 | 	unsigned int resid_len;	/* residual count */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | 	void *sense; | 
 | 165 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 166 | 	unsigned long deadline; | 
 | 167 | 	struct list_head timeout_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | 	unsigned int timeout; | 
| Mike Christie | 17e01f2 | 2005-11-11 05:31:37 -0600 | [diff] [blame] | 169 | 	int retries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 |  | 
 | 171 | 	/* | 
| Jens Axboe | c00895a | 2006-09-30 20:29:12 +0200 | [diff] [blame] | 172 | 	 * completion callback. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | 	 */ | 
 | 174 | 	rq_end_io_fn *end_io; | 
 | 175 | 	void *end_io_data; | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 176 |  | 
 | 177 | 	/* for bidi */ | 
 | 178 | 	struct request *next_rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | }; | 
 | 180 |  | 
| Fernando Luis Vázquez Cao | 766ca44 | 2008-08-14 09:59:13 +0200 | [diff] [blame] | 181 | static inline unsigned short req_get_ioprio(struct request *req) | 
 | 182 | { | 
 | 183 | 	return req->ioprio; | 
 | 184 | } | 
 | 185 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | /* | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 187 |  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 |  * requests. Some step values could eventually be made generic. | 
 | 189 |  */ | 
 | 190 | struct request_pm_state | 
 | 191 | { | 
 | 192 | 	/* PM state machine step value, currently driver specific */ | 
 | 193 | 	int	pm_step; | 
 | 194 | 	/* requested PM state value (S1, S2, S3, S4, ...) */ | 
 | 195 | 	u32	pm_state; | 
 | 196 | 	void*	data;		/* for driver use */ | 
 | 197 | }; | 
 | 198 |  | 
 | 199 | #include <linux/elevator.h> | 
 | 200 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 201 | typedef void (request_fn_proc) (struct request_queue *q); | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 202 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 203 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 204 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  | 
 | 206 | struct bio_vec; | 
| Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 207 | struct bvec_merge_data { | 
 | 208 | 	struct block_device *bi_bdev; | 
 | 209 | 	sector_t bi_sector; | 
 | 210 | 	unsigned bi_size; | 
 | 211 | 	unsigned long bi_rw; | 
 | 212 | }; | 
 | 213 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 
 | 214 | 			     struct bio_vec *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 215 | typedef void (softirq_done_fn)(struct request *); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 216 | typedef int (dma_drain_needed_fn)(struct request *); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 217 | typedef int (lld_busy_fn) (struct request_queue *q); | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 218 | typedef int (bsg_job_fn) (struct bsg_job *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 220 | enum blk_eh_timer_return { | 
 | 221 | 	BLK_EH_NOT_HANDLED, | 
 | 222 | 	BLK_EH_HANDLED, | 
 | 223 | 	BLK_EH_RESET_TIMER, | 
 | 224 | }; | 
 | 225 |  | 
 | 226 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | 
 | 227 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | enum blk_queue_state { | 
 | 229 | 	Queue_down, | 
 | 230 | 	Queue_up, | 
 | 231 | }; | 
 | 232 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | struct blk_queue_tag { | 
 | 234 | 	struct request **tag_index;	/* map of busy tags */ | 
 | 235 | 	unsigned long *tag_map;		/* bit map of free/busy tags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | 	int busy;			/* current depth */ | 
 | 237 | 	int max_depth;			/* what we will send to device */ | 
| Tejun Heo | ba02508 | 2005-08-05 13:28:11 -0700 | [diff] [blame] | 238 | 	int real_max_depth;		/* what the array can hold */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | 	atomic_t refcnt;		/* map can be shared */ | 
 | 240 | }; | 
 | 241 |  | 
| FUJITA Tomonori | abf5439 | 2008-08-16 14:10:05 +0900 | [diff] [blame] | 242 | #define BLK_SCSI_MAX_CMDS	(256) | 
 | 243 | #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 
 | 244 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 245 | struct queue_limits { | 
 | 246 | 	unsigned long		bounce_pfn; | 
 | 247 | 	unsigned long		seg_boundary_mask; | 
 | 248 |  | 
 | 249 | 	unsigned int		max_hw_sectors; | 
 | 250 | 	unsigned int		max_sectors; | 
 | 251 | 	unsigned int		max_segment_size; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 252 | 	unsigned int		physical_block_size; | 
 | 253 | 	unsigned int		alignment_offset; | 
 | 254 | 	unsigned int		io_min; | 
 | 255 | 	unsigned int		io_opt; | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 256 | 	unsigned int		max_discard_sectors; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 257 | 	unsigned int		discard_granularity; | 
 | 258 | 	unsigned int		discard_alignment; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 259 |  | 
 | 260 | 	unsigned short		logical_block_size; | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 261 | 	unsigned short		max_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 262 | 	unsigned short		max_integrity_segments; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 263 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 264 | 	unsigned char		misaligned; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 265 | 	unsigned char		discard_misaligned; | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 266 | 	unsigned char		cluster; | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 267 | 	unsigned char		discard_zeroes_data; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 268 | }; | 
 | 269 |  | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 270 | struct request_queue { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | 	/* | 
 | 272 | 	 * Together with queue_head for cacheline sharing | 
 | 273 | 	 */ | 
 | 274 | 	struct list_head	queue_head; | 
 | 275 | 	struct request		*last_merge; | 
| Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 276 | 	struct elevator_queue	*elevator; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
 | 278 | 	/* | 
 | 279 | 	 * the queue request freelist, one for reads and one for writes | 
 | 280 | 	 */ | 
 | 281 | 	struct request_list	rq; | 
 | 282 |  | 
 | 283 | 	request_fn_proc		*request_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | 	make_request_fn		*make_request_fn; | 
 | 285 | 	prep_rq_fn		*prep_rq_fn; | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 286 | 	unprep_rq_fn		*unprep_rq_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | 	merge_bvec_fn		*merge_bvec_fn; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 288 | 	softirq_done_fn		*softirq_done_fn; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 289 | 	rq_timed_out_fn		*rq_timed_out_fn; | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 290 | 	dma_drain_needed_fn	*dma_drain_needed; | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 291 | 	lld_busy_fn		*lld_busy_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 |  | 
 | 293 | 	/* | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 294 | 	 * Dispatch queue sorting | 
 | 295 | 	 */ | 
| Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 296 | 	sector_t		end_sector; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 297 | 	struct request		*boundary_rq; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 298 |  | 
 | 299 | 	/* | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 300 | 	 * Delayed queue handling | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | 	 */ | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 302 | 	struct delayed_work	delay_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 |  | 
 | 304 | 	struct backing_dev_info	backing_dev_info; | 
 | 305 |  | 
 | 306 | 	/* | 
 | 307 | 	 * The queue owner gets to use this for whatever they like. | 
 | 308 | 	 * ll_rw_blk doesn't touch it. | 
 | 309 | 	 */ | 
 | 310 | 	void			*queuedata; | 
 | 311 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | 	 * various queue flags, see QUEUE_* below | 
 | 314 | 	 */ | 
 | 315 | 	unsigned long		queue_flags; | 
 | 316 |  | 
 | 317 | 	/* | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 318 | 	 * ida allocated id for this queue.  Used to index queues from | 
 | 319 | 	 * ioctx. | 
 | 320 | 	 */ | 
 | 321 | 	int			id; | 
 | 322 |  | 
 | 323 | 	/* | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 324 | 	 * queue needs bounce pages for pages above this limit | 
 | 325 | 	 */ | 
 | 326 | 	gfp_t			bounce_gfp; | 
 | 327 |  | 
 | 328 | 	/* | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 329 | 	 * protects queue structures from reentrancy. ->__queue_lock should | 
 | 330 | 	 * _never_ be used directly, it is queue private. always use | 
 | 331 | 	 * ->queue_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | 	 */ | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 333 | 	spinlock_t		__queue_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | 	spinlock_t		*queue_lock; | 
 | 335 |  | 
 | 336 | 	/* | 
 | 337 | 	 * queue kobject | 
 | 338 | 	 */ | 
 | 339 | 	struct kobject kobj; | 
 | 340 |  | 
 | 341 | 	/* | 
 | 342 | 	 * queue settings | 
 | 343 | 	 */ | 
 | 344 | 	unsigned long		nr_requests;	/* Max # of requests */ | 
 | 345 | 	unsigned int		nr_congestion_on; | 
 | 346 | 	unsigned int		nr_congestion_off; | 
 | 347 | 	unsigned int		nr_batching; | 
 | 348 |  | 
| James Bottomley | fa0ccd8 | 2008-01-10 11:30:36 -0600 | [diff] [blame] | 349 | 	unsigned int		dma_drain_size; | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 350 | 	void			*dma_drain_buffer; | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 351 | 	unsigned int		dma_pad_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | 	unsigned int		dma_alignment; | 
 | 353 |  | 
 | 354 | 	struct blk_queue_tag	*queue_tags; | 
| Jens Axboe | 6eca900 | 2007-10-25 10:14:47 +0200 | [diff] [blame] | 355 | 	struct list_head	tag_busy_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 |  | 
| Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 357 | 	unsigned int		nr_sorted; | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 358 | 	unsigned int		in_flight[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 360 | 	unsigned int		rq_timeout; | 
 | 361 | 	struct timer_list	timeout; | 
 | 362 | 	struct list_head	timeout_list; | 
 | 363 |  | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 364 | 	struct list_head	icq_list; | 
 | 365 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 366 | 	struct queue_limits	limits; | 
 | 367 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | 	/* | 
 | 369 | 	 * sg stuff | 
 | 370 | 	 */ | 
 | 371 | 	unsigned int		sg_timeout; | 
 | 372 | 	unsigned int		sg_reserved_size; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 373 | 	int			node; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 374 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 375 | 	struct blk_trace	*blk_trace; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 376 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | 	/* | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 378 | 	 * for flush operations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | 	 */ | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 380 | 	unsigned int		flush_flags; | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 381 | 	unsigned int		flush_not_queueable:1; | 
| shaohua.li@intel.com | 3ac0cc4 | 2011-05-06 11:34:41 -0600 | [diff] [blame] | 382 | 	unsigned int		flush_queue_delayed:1; | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 383 | 	unsigned int		flush_pending_idx:1; | 
 | 384 | 	unsigned int		flush_running_idx:1; | 
 | 385 | 	unsigned long		flush_pending_since; | 
 | 386 | 	struct list_head	flush_queue[2]; | 
 | 387 | 	struct list_head	flush_data_in_flight; | 
| Tejun Heo | dd4c133 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 388 | 	struct request		flush_rq; | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 389 |  | 
 | 390 | 	struct mutex		sysfs_lock; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 391 |  | 
 | 392 | #if defined(CONFIG_BLK_DEV_BSG) | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 393 | 	bsg_job_fn		*bsg_job_fn; | 
 | 394 | 	int			bsg_job_size; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 395 | 	struct bsg_class_device bsg_dev; | 
 | 396 | #endif | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 397 |  | 
 | 398 | #ifdef CONFIG_BLK_DEV_THROTTLING | 
 | 399 | 	/* Throttle data */ | 
 | 400 | 	struct throtl_data *td; | 
 | 401 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | }; | 
 | 403 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */ | 
 | 405 | #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 406 | #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */ | 
 | 407 | #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | #define QUEUE_FLAG_DEAD		5	/* queue being torn down */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 409 | #define QUEUE_FLAG_ELVSWITCH	6	/* don't use elevator, just do FIFO */ | 
 | 410 | #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */ | 
 | 411 | #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */ | 
| Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 412 | #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 413 | #define QUEUE_FLAG_FAIL_IO     10	/* fake timeout */ | 
 | 414 | #define QUEUE_FLAG_STACKABLE   11	/* supports request stacking */ | 
 | 415 | #define QUEUE_FLAG_NONROT      12	/* non-rotational device (SSD) */ | 
| Fernando Luis Vázquez Cao | 88e740f | 2008-10-27 18:44:46 +0900 | [diff] [blame] | 416 | #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 417 | #define QUEUE_FLAG_IO_STAT     13	/* do IO stats */ | 
 | 418 | #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */ | 
 | 419 | #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */ | 
 | 420 | #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */ | 
 | 421 | #define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */ | 
| Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 422 | #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */ | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 423 |  | 
 | 424 | #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\ | 
| Jens Axboe | 01e97f6 | 2009-09-03 20:06:47 +0200 | [diff] [blame] | 425 | 				 (1 << QUEUE_FLAG_STACKABLE)	|	\ | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 426 | 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\ | 
 | 427 | 				 (1 << QUEUE_FLAG_ADD_RANDOM)) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 428 |  | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 429 | static inline int queue_is_locked(struct request_queue *q) | 
 | 430 | { | 
| Jens Axboe | 7663c1e | 2008-04-29 21:31:27 +0200 | [diff] [blame] | 431 | #ifdef CONFIG_SMP | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 432 | 	spinlock_t *lock = q->queue_lock; | 
 | 433 | 	return lock && spin_is_locked(lock); | 
| Jens Axboe | 7663c1e | 2008-04-29 21:31:27 +0200 | [diff] [blame] | 434 | #else | 
 | 435 | 	return 1; | 
 | 436 | #endif | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 437 | } | 
 | 438 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 439 | static inline void queue_flag_set_unlocked(unsigned int flag, | 
 | 440 | 					   struct request_queue *q) | 
 | 441 | { | 
 | 442 | 	__set_bit(flag, &q->queue_flags); | 
 | 443 | } | 
 | 444 |  | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 445 | static inline int queue_flag_test_and_clear(unsigned int flag, | 
 | 446 | 					    struct request_queue *q) | 
 | 447 | { | 
 | 448 | 	WARN_ON_ONCE(!queue_is_locked(q)); | 
 | 449 |  | 
 | 450 | 	if (test_bit(flag, &q->queue_flags)) { | 
 | 451 | 		__clear_bit(flag, &q->queue_flags); | 
 | 452 | 		return 1; | 
 | 453 | 	} | 
 | 454 |  | 
 | 455 | 	return 0; | 
 | 456 | } | 
 | 457 |  | 
 | 458 | static inline int queue_flag_test_and_set(unsigned int flag, | 
 | 459 | 					  struct request_queue *q) | 
 | 460 | { | 
 | 461 | 	WARN_ON_ONCE(!queue_is_locked(q)); | 
 | 462 |  | 
 | 463 | 	if (!test_bit(flag, &q->queue_flags)) { | 
 | 464 | 		__set_bit(flag, &q->queue_flags); | 
 | 465 | 		return 0; | 
 | 466 | 	} | 
 | 467 |  | 
 | 468 | 	return 1; | 
 | 469 | } | 
 | 470 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 471 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 
 | 472 | { | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 473 | 	WARN_ON_ONCE(!queue_is_locked(q)); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 474 | 	__set_bit(flag, &q->queue_flags); | 
 | 475 | } | 
 | 476 |  | 
 | 477 | static inline void queue_flag_clear_unlocked(unsigned int flag, | 
 | 478 | 					     struct request_queue *q) | 
 | 479 | { | 
 | 480 | 	__clear_bit(flag, &q->queue_flags); | 
 | 481 | } | 
 | 482 |  | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 483 | static inline int queue_in_flight(struct request_queue *q) | 
 | 484 | { | 
 | 485 | 	return q->in_flight[0] + q->in_flight[1]; | 
 | 486 | } | 
 | 487 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 488 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 
 | 489 | { | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 490 | 	WARN_ON_ONCE(!queue_is_locked(q)); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 491 | 	__clear_bit(flag, &q->queue_flags); | 
 | 492 | } | 
 | 493 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 
 | 495 | #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 
| Tejun Heo | 34f6055 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 496 | #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 
| Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 497 | #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 
| Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 498 | #define blk_queue_noxmerges(q)	\ | 
 | 499 | 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 
| Jens Axboe | a68bbdd | 2008-09-24 13:03:33 +0200 | [diff] [blame] | 500 | #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 501 | #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 502 | #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 
| Kiyoshi Ueda | 4ee5eaf | 2008-09-18 10:46:13 -0400 | [diff] [blame] | 503 | #define blk_queue_stackable(q)	\ | 
 | 504 | 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 
| Christoph Hellwig | c15227d | 2009-09-30 13:52:12 +0200 | [diff] [blame] | 505 | #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 506 | #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \ | 
 | 507 | 	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 509 | #define blk_noretry_request(rq) \ | 
 | 510 | 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 
 | 511 | 			     REQ_FAILFAST_DRIVER)) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 512 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 513 | #define blk_account_rq(rq) \ | 
 | 514 | 	(((rq)->cmd_flags & REQ_STARTED) && \ | 
 | 515 | 	 ((rq)->cmd_type == REQ_TYPE_FS || \ | 
 | 516 | 	  ((rq)->cmd_flags & REQ_DISCARD))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | #define blk_pm_request(rq)	\ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 519 | 	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | 
 | 520 | 	 (rq)->cmd_type == REQ_TYPE_PM_RESUME) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 521 |  | 
| Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 522 | #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1) | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 523 | #define blk_bidi_rq(rq)		((rq)->next_rq != NULL) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 524 | /* rq->queuelist of dequeued request must be list_empty() */ | 
 | 525 | #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 |  | 
 | 527 | #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist) | 
 | 528 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 529 | #define rq_data_dir(rq)		((rq)->cmd_flags & 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 |  | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 531 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | 
 | 532 | { | 
 | 533 | 	return q->limits.cluster; | 
 | 534 | } | 
 | 535 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 536 | /* | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 537 |  * We regard a request as sync, if either a read or a sync write | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 538 |  */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 539 | static inline bool rw_is_sync(unsigned int rw_flags) | 
 | 540 | { | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 541 | 	return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 542 | } | 
 | 543 |  | 
 | 544 | static inline bool rq_is_sync(struct request *rq) | 
 | 545 | { | 
 | 546 | 	return rw_is_sync(rq->cmd_flags); | 
 | 547 | } | 
 | 548 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 549 | static inline int blk_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 551 | 	if (sync) | 
 | 552 | 		return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 
 | 553 | 	return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | } | 
 | 555 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 556 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 558 | 	if (sync) | 
 | 559 | 		queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | 	else | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 561 | 		queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | } | 
 | 563 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 564 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 566 | 	if (sync) | 
 | 567 | 		queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | 	else | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 569 | 		queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } | 
 | 571 |  | 
 | 572 |  | 
 | 573 | /* | 
 | 574 |  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may | 
 | 575 |  * it already be started by driver. | 
 | 576 |  */ | 
 | 577 | #define RQ_NOMERGE_FLAGS	\ | 
| Christoph Hellwig | 02e031c | 2010-11-10 14:54:09 +0100 | [diff] [blame] | 578 | 	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | #define rq_mergeable(rq)	\ | 
| David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 580 | 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 581 | 	 (((rq)->cmd_flags & REQ_DISCARD) || \ | 
 | 582 | 	  (rq)->cmd_type == REQ_TYPE_FS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 |  | 
 | 584 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 |  * q->prep_rq_fn return values | 
 | 586 |  */ | 
 | 587 | #define BLKPREP_OK		0	/* serve it */ | 
 | 588 | #define BLKPREP_KILL		1	/* fatal error, kill */ | 
 | 589 | #define BLKPREP_DEFER		2	/* leave on queue */ | 
 | 590 |  | 
 | 591 | extern unsigned long blk_max_low_pfn, blk_max_pfn; | 
 | 592 |  | 
 | 593 | /* | 
 | 594 |  * standard bounce addresses: | 
 | 595 |  * | 
 | 596 |  * BLK_BOUNCE_HIGH	: bounce all highmem pages | 
 | 597 |  * BLK_BOUNCE_ANY	: don't bounce anything | 
 | 598 |  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary | 
 | 599 |  */ | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 600 |  | 
 | 601 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT) | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 603 | #else | 
 | 604 | #define BLK_BOUNCE_HIGH		-1ULL | 
 | 605 | #endif | 
 | 606 | #define BLK_BOUNCE_ANY		(-1ULL) | 
| FUJITA Tomonori | bfe1723 | 2010-05-31 15:59:03 +0900 | [diff] [blame] | 607 | #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 |  | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 609 | /* | 
 | 610 |  * default timeout for SG_IO if none specified | 
 | 611 |  */ | 
 | 612 | #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ) | 
| Linus Torvalds | f2f1fa7 | 2008-12-05 14:49:18 -0800 | [diff] [blame] | 613 | #define BLK_MIN_SG_TIMEOUT	(7 * HZ) | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 614 |  | 
| Christoph Lameter | 2a7326b | 2007-07-17 04:03:37 -0700 | [diff] [blame] | 615 | #ifdef CONFIG_BOUNCE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | extern int init_emergency_isa_pool(void); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 617 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | #else | 
 | 619 | static inline int init_emergency_isa_pool(void) | 
 | 620 | { | 
 | 621 | 	return 0; | 
 | 622 | } | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 623 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | { | 
 | 625 | } | 
 | 626 | #endif /* CONFIG_MMU */ | 
 | 627 |  | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 628 | struct rq_map_data { | 
 | 629 | 	struct page **pages; | 
 | 630 | 	int page_order; | 
 | 631 | 	int nr_entries; | 
| FUJITA Tomonori | 56c451f | 2008-12-18 14:49:37 +0900 | [diff] [blame] | 632 | 	unsigned long offset; | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 633 | 	int null_mapped; | 
| FUJITA Tomonori | ecb554a | 2009-07-09 14:46:53 +0200 | [diff] [blame] | 634 | 	int from_user; | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 635 | }; | 
 | 636 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 637 | struct req_iterator { | 
 | 638 | 	int i; | 
 | 639 | 	struct bio *bio; | 
 | 640 | }; | 
 | 641 |  | 
 | 642 | /* This should not be used directly - use rq_for_each_segment */ | 
| Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 643 | #define for_each_bio(_bio)		\ | 
 | 644 | 	for (; _bio; _bio = _bio->bi_next) | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 645 | #define __rq_for_each_bio(_bio, rq)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | 	if ((rq->bio))			\ | 
 | 647 | 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 
 | 648 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 649 | #define rq_for_each_segment(bvl, _rq, _iter)			\ | 
 | 650 | 	__rq_for_each_bio(_iter.bio, _rq)			\ | 
 | 651 | 		bio_for_each_segment(bvl, _iter.bio, _iter.i) | 
 | 652 |  | 
 | 653 | #define rq_iter_last(rq, _iter)					\ | 
 | 654 | 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 
 | 655 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 656 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
 | 657 | # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 
 | 658 | #endif | 
 | 659 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
 | 660 | extern void rq_flush_dcache_pages(struct request *rq); | 
 | 661 | #else | 
 | 662 | static inline void rq_flush_dcache_pages(struct request *rq) | 
 | 663 | { | 
 | 664 | } | 
 | 665 | #endif | 
 | 666 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | extern int blk_register_queue(struct gendisk *disk); | 
 | 668 | extern void blk_unregister_queue(struct gendisk *disk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | extern void generic_make_request(struct bio *bio); | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 670 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | extern void blk_put_request(struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 672 | extern void __blk_put_request(struct request_queue *, struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 673 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 674 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 
 | 675 | 					gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 676 | extern void blk_requeue_request(struct request_queue *, struct request *); | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 677 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 
 | 678 | 		unsigned int len); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 679 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 680 | extern int blk_lld_busy(struct request_queue *q); | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 681 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
 | 682 | 			     struct bio_set *bs, gfp_t gfp_mask, | 
 | 683 | 			     int (*bio_ctr)(struct bio *, struct bio *, void *), | 
 | 684 | 			     void *data); | 
 | 685 | extern void blk_rq_unprep_clone(struct request *rq); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 686 | extern int blk_insert_cloned_request(struct request_queue *q, | 
 | 687 | 				     struct request *rq); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 688 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 689 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 
| Paolo Bonzini | 0bfc96c | 2012-01-12 16:01:28 +0100 | [diff] [blame] | 690 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); | 
| Paolo Bonzini | 577ebb3 | 2012-01-12 16:01:27 +0100 | [diff] [blame] | 691 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, | 
 | 692 | 			      unsigned int, void __user *); | 
| Al Viro | 74f3c8a | 2007-08-27 15:38:10 -0400 | [diff] [blame] | 693 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
 | 694 | 			  unsigned int, void __user *); | 
| Al Viro | e915e87 | 2008-09-02 17:16:41 -0400 | [diff] [blame] | 695 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
 | 696 | 			 struct scsi_ioctl_command __user *); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 697 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 698 | extern void blk_queue_bio(struct request_queue *q, struct bio *bio); | 
| Christoph Hellwig | 166e1f9 | 2011-09-12 12:08:27 +0200 | [diff] [blame] | 699 |  | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 700 | /* | 
 | 701 |  * A queue has just exitted congestion.  Note this in the global counter of | 
 | 702 |  * congested queues, and wake up anyone who was waiting for requests to be | 
 | 703 |  * put back. | 
 | 704 |  */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 705 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 706 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 707 | 	clear_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 708 | } | 
 | 709 |  | 
 | 710 | /* | 
 | 711 |  * A queue has just entered congestion.  Flag that in the queue's VM-visible | 
 | 712 |  * state flags and increment the global gounter of congested queues. | 
 | 713 |  */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 714 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 715 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 716 | 	set_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 717 | } | 
 | 718 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 719 | extern void blk_start_queue(struct request_queue *q); | 
 | 720 | extern void blk_stop_queue(struct request_queue *q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | extern void blk_sync_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 722 | extern void __blk_stop_queue(struct request_queue *q); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 723 | extern void __blk_run_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 724 | extern void blk_run_queue(struct request_queue *); | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 725 | extern void blk_run_queue_async(struct request_queue *q); | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 726 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 727 | 			   struct rq_map_data *, void __user *, unsigned long, | 
 | 728 | 			   gfp_t); | 
| Jens Axboe | 8e5cfc4 | 2006-12-19 11:12:46 +0100 | [diff] [blame] | 729 | extern int blk_rq_unmap_user(struct bio *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 730 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 
 | 731 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 732 | 			       struct rq_map_data *, struct sg_iovec *, int, | 
 | 733 | 			       unsigned int, gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 734 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 
| James Bottomley  | 994ca9a | 2005-06-20 14:11:09 +0200 | [diff] [blame] | 735 | 			  struct request *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 736 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 
| Jens Axboe | 15fc858 | 2006-01-06 10:00:50 +0100 | [diff] [blame] | 737 | 				  struct request *, int, rq_end_io_fn *); | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 738 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 739 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | { | 
 | 741 | 	return bdev->bd_disk->queue; | 
 | 742 | } | 
 | 743 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | /* | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 745 |  * blk_rq_pos()			: the current sector | 
 | 746 |  * blk_rq_bytes()		: bytes left in the entire request | 
 | 747 |  * blk_rq_cur_bytes()		: bytes left in the current segment | 
 | 748 |  * blk_rq_err_bytes()		: bytes left till the next error boundary | 
 | 749 |  * blk_rq_sectors()		: sectors left in the entire request | 
 | 750 |  * blk_rq_cur_sectors()		: sectors left in the current segment | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 751 |  */ | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 752 | static inline sector_t blk_rq_pos(const struct request *rq) | 
 | 753 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 754 | 	return rq->__sector; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 755 | } | 
 | 756 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 757 | static inline unsigned int blk_rq_bytes(const struct request *rq) | 
 | 758 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 759 | 	return rq->__data_len; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 760 | } | 
 | 761 |  | 
 | 762 | static inline int blk_rq_cur_bytes(const struct request *rq) | 
 | 763 | { | 
 | 764 | 	return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 
 | 765 | } | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 766 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 767 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | 
 | 768 |  | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 769 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 
 | 770 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 771 | 	return blk_rq_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 772 | } | 
 | 773 |  | 
 | 774 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | 
 | 775 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 776 | 	return blk_rq_cur_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 777 | } | 
 | 778 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 779 | /* | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 780 |  * Request issue related functions. | 
 | 781 |  */ | 
 | 782 | extern struct request *blk_peek_request(struct request_queue *q); | 
 | 783 | extern void blk_start_request(struct request *rq); | 
 | 784 | extern struct request *blk_fetch_request(struct request_queue *q); | 
 | 785 |  | 
 | 786 | /* | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 787 |  * Request completion related functions. | 
 | 788 |  * | 
 | 789 |  * blk_update_request() completes given number of bytes and updates | 
 | 790 |  * the request without completing it. | 
 | 791 |  * | 
| Tejun Heo | f06d9a2 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 792 |  * blk_end_request() and friends.  __blk_end_request() must be called | 
 | 793 |  * with the request queue spinlock acquired. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 |  * | 
 | 795 |  * Several drivers define their own end_request and call | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 796 |  * blk_end_request() for parts of the original function. | 
 | 797 |  * This prevents code duplication in drivers. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 |  */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 799 | extern bool blk_update_request(struct request *rq, int error, | 
 | 800 | 			       unsigned int nr_bytes); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 801 | extern bool blk_end_request(struct request *rq, int error, | 
 | 802 | 			    unsigned int nr_bytes); | 
 | 803 | extern void blk_end_request_all(struct request *rq, int error); | 
 | 804 | extern bool blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 805 | extern bool blk_end_request_err(struct request *rq, int error); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 806 | extern bool __blk_end_request(struct request *rq, int error, | 
 | 807 | 			      unsigned int nr_bytes); | 
 | 808 | extern void __blk_end_request_all(struct request *rq, int error); | 
 | 809 | extern bool __blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 810 | extern bool __blk_end_request_err(struct request *rq, int error); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 811 |  | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 812 | extern void blk_complete_request(struct request *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 813 | extern void __blk_complete_request(struct request *); | 
 | 814 | extern void blk_abort_request(struct request *); | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 815 | extern void blk_abort_queue(struct request_queue *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 816 | extern void blk_unprep_request(struct request *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 817 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 |  * Access functions for manipulating queue properties | 
 | 820 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 821 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 822 | 					spinlock_t *lock, int node_id); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 823 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 824 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 
 | 825 | 						      request_fn_proc *, spinlock_t *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 826 | extern void blk_cleanup_queue(struct request_queue *); | 
 | 827 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 
 | 828 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 
| Mike Snitzer | 72d4cd9 | 2010-12-17 08:34:20 +0100 | [diff] [blame] | 829 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 
| Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 830 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 831 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 832 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 833 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 
 | 834 | 		unsigned int max_discard_sectors); | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 835 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 836 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 837 | extern void blk_queue_alignment_offset(struct request_queue *q, | 
 | 838 | 				       unsigned int alignment); | 
| Martin K. Petersen | 7c958e3 | 2009-07-31 11:49:11 -0400 | [diff] [blame] | 839 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 840 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 
| Martin K. Petersen | 3c5820c | 2009-09-11 21:54:52 +0200 | [diff] [blame] | 841 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 842 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 
| Martin K. Petersen | e475bba | 2009-06-16 08:23:52 +0200 | [diff] [blame] | 843 | extern void blk_set_default_limits(struct queue_limits *lim); | 
| Martin K. Petersen | b1bd055 | 2012-01-11 16:27:11 +0100 | [diff] [blame] | 844 | extern void blk_set_stacking_limits(struct queue_limits *lim); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 845 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 
 | 846 | 			    sector_t offset); | 
| Martin K. Petersen | 17be8c2 | 2010-01-11 03:21:49 -0500 | [diff] [blame] | 847 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 
 | 848 | 			    sector_t offset); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 849 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 
 | 850 | 			      sector_t offset); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 851 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 852 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 853 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 854 | extern int blk_queue_dma_drain(struct request_queue *q, | 
 | 855 | 			       dma_drain_needed_fn *dma_drain_needed, | 
 | 856 | 			       void *buf, unsigned int size); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 857 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 858 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 
 | 859 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 860 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 861 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 
 | 862 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 
| James Bottomley | 11c3e68 | 2007-12-31 16:37:00 -0600 | [diff] [blame] | 863 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 864 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 865 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 
 | 866 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 867 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 868 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 869 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 871 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | extern void blk_dump_rq_flags(struct request *, char *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | extern long nr_blockdev_pages(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 875 | bool __must_check blk_get_queue(struct request_queue *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 876 | struct request_queue *blk_alloc_queue(gfp_t); | 
 | 877 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 
 | 878 | extern void blk_put_queue(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 |  | 
| Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 880 | /* | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 881 |  * blk_plug permits building a queue of related requests by holding the I/O | 
 | 882 |  * fragments for a short period. This allows merging of sequential requests | 
 | 883 |  * into single larger request. As the requests are moved from a per-task list to | 
 | 884 |  * the device's request_queue in a batch, this results in improved scalability | 
 | 885 |  * as the lock contention for request_queue lock is reduced. | 
 | 886 |  * | 
 | 887 |  * It is ok not to disable preemption when adding the request to the plug list | 
 | 888 |  * or when attempting a merge, because blk_schedule_flush_list() will only flush | 
 | 889 |  * the plug list when the task sleeps by itself. For details, please see | 
 | 890 |  * schedule() where blk_schedule_flush_plug() is called. | 
| Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 891 |  */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 892 | struct blk_plug { | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 893 | 	unsigned long magic; /* detect uninitialized use-cases */ | 
 | 894 | 	struct list_head list; /* requests */ | 
 | 895 | 	struct list_head cb_list; /* md requires an unplug callback */ | 
 | 896 | 	unsigned int should_sort; /* list to be sorted before flushing? */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 897 | }; | 
| Shaohua Li | 55c022b | 2011-07-08 08:19:20 +0200 | [diff] [blame] | 898 | #define BLK_MAX_REQUEST_COUNT 16 | 
 | 899 |  | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 900 | struct blk_plug_cb { | 
 | 901 | 	struct list_head list; | 
 | 902 | 	void (*callback)(struct blk_plug_cb *); | 
 | 903 | }; | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 904 |  | 
 | 905 | extern void blk_start_plug(struct blk_plug *); | 
 | 906 | extern void blk_finish_plug(struct blk_plug *); | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 907 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 908 |  | 
 | 909 | static inline void blk_flush_plug(struct task_struct *tsk) | 
 | 910 | { | 
 | 911 | 	struct blk_plug *plug = tsk->plug; | 
 | 912 |  | 
| Christoph Hellwig | 88b996c | 2011-04-15 15:20:10 +0200 | [diff] [blame] | 913 | 	if (plug) | 
| Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 914 | 		blk_flush_plug_list(plug, false); | 
 | 915 | } | 
 | 916 |  | 
 | 917 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) | 
 | 918 | { | 
 | 919 | 	struct blk_plug *plug = tsk->plug; | 
 | 920 |  | 
 | 921 | 	if (plug) | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 922 | 		blk_flush_plug_list(plug, true); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 923 | } | 
 | 924 |  | 
 | 925 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 
 | 926 | { | 
 | 927 | 	struct blk_plug *plug = tsk->plug; | 
 | 928 |  | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 929 | 	return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 930 | } | 
 | 931 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | /* | 
 | 933 |  * tag stuff | 
 | 934 |  */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 935 | #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED) | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 936 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 
 | 937 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 
 | 938 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 
 | 939 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); | 
 | 940 | extern void blk_queue_free_tags(struct request_queue *); | 
 | 941 | extern int blk_queue_resize_tags(struct request_queue *, int); | 
 | 942 | extern void blk_queue_invalidate_tags(struct request_queue *); | 
| James Bottomley | 492dfb4 | 2006-08-30 15:48:45 -0400 | [diff] [blame] | 943 | extern struct blk_queue_tag *blk_init_tags(int); | 
 | 944 | extern void blk_free_tags(struct blk_queue_tag *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 |  | 
| David C Somayajulu | f583f49 | 2006-10-04 08:27:25 +0200 | [diff] [blame] | 946 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | 
 | 947 | 						int tag) | 
 | 948 | { | 
 | 949 | 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | 
 | 950 | 		return NULL; | 
 | 951 | 	return bqt->tag_index[tag]; | 
 | 952 | } | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 953 |  | 
 | 954 | #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */ | 
 | 955 |  | 
 | 956 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 
| Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 957 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
 | 958 | 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 959 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 960 | 			sector_t nr_sects, gfp_t gfp_mask); | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 961 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 
 | 962 | 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 963 | { | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 964 | 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), | 
 | 965 | 				    nr_blocks << (sb->s_blocksize_bits - 9), | 
 | 966 | 				    gfp_mask, flags); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 967 | } | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 968 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 969 | 		sector_t nr_blocks, gfp_t gfp_mask) | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 970 | { | 
 | 971 | 	return blkdev_issue_zeroout(sb->s_bdev, | 
 | 972 | 				    block << (sb->s_blocksize_bits - 9), | 
 | 973 | 				    nr_blocks << (sb->s_blocksize_bits - 9), | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 974 | 				    gfp_mask); | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 975 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 |  | 
| Jens Axboe | 018e044 | 2009-06-26 16:27:10 +0200 | [diff] [blame] | 977 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 
| Adel Gadllah | 0b07de8 | 2008-06-26 13:48:27 +0200 | [diff] [blame] | 978 |  | 
| Martin K. Petersen | eb28d31 | 2010-02-26 00:20:37 -0500 | [diff] [blame] | 979 | enum blk_default_limits { | 
 | 980 | 	BLK_MAX_SEGMENTS	= 128, | 
 | 981 | 	BLK_SAFE_MAX_SECTORS	= 255, | 
 | 982 | 	BLK_DEF_MAX_SECTORS	= 1024, | 
 | 983 | 	BLK_MAX_SEGMENT_SIZE	= 65536, | 
 | 984 | 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL, | 
 | 985 | }; | 
| Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 986 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 
 | 988 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 989 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) | 
 | 990 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 991 | 	return q->limits.bounce_pfn; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 992 | } | 
 | 993 |  | 
 | 994 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | 
 | 995 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 996 | 	return q->limits.seg_boundary_mask; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 997 | } | 
 | 998 |  | 
 | 999 | static inline unsigned int queue_max_sectors(struct request_queue *q) | 
 | 1000 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1001 | 	return q->limits.max_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1002 | } | 
 | 1003 |  | 
 | 1004 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | 
 | 1005 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1006 | 	return q->limits.max_hw_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1007 | } | 
 | 1008 |  | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1009 | static inline unsigned short queue_max_segments(struct request_queue *q) | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1010 | { | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1011 | 	return q->limits.max_segments; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1012 | } | 
 | 1013 |  | 
 | 1014 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 
 | 1015 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1016 | 	return q->limits.max_segment_size; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1017 | } | 
 | 1018 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1019 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | { | 
 | 1021 | 	int retval = 512; | 
 | 1022 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1023 | 	if (q && q->limits.logical_block_size) | 
 | 1024 | 		retval = q->limits.logical_block_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 |  | 
 | 1026 | 	return retval; | 
 | 1027 | } | 
 | 1028 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1029 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | { | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1031 | 	return queue_logical_block_size(bdev_get_queue(bdev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | } | 
 | 1033 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1034 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | 
 | 1035 | { | 
 | 1036 | 	return q->limits.physical_block_size; | 
 | 1037 | } | 
 | 1038 |  | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 1039 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1040 | { | 
 | 1041 | 	return queue_physical_block_size(bdev_get_queue(bdev)); | 
 | 1042 | } | 
 | 1043 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1044 | static inline unsigned int queue_io_min(struct request_queue *q) | 
 | 1045 | { | 
 | 1046 | 	return q->limits.io_min; | 
 | 1047 | } | 
 | 1048 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1049 | static inline int bdev_io_min(struct block_device *bdev) | 
 | 1050 | { | 
 | 1051 | 	return queue_io_min(bdev_get_queue(bdev)); | 
 | 1052 | } | 
 | 1053 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1054 | static inline unsigned int queue_io_opt(struct request_queue *q) | 
 | 1055 | { | 
 | 1056 | 	return q->limits.io_opt; | 
 | 1057 | } | 
 | 1058 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1059 | static inline int bdev_io_opt(struct block_device *bdev) | 
 | 1060 | { | 
 | 1061 | 	return queue_io_opt(bdev_get_queue(bdev)); | 
 | 1062 | } | 
 | 1063 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1064 | static inline int queue_alignment_offset(struct request_queue *q) | 
 | 1065 | { | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1066 | 	if (q->limits.misaligned) | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1067 | 		return -1; | 
 | 1068 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1069 | 	return q->limits.alignment_offset; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1070 | } | 
 | 1071 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1072 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1073 | { | 
 | 1074 | 	unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1075 | 	unsigned int alignment = (sector << 9) & (granularity - 1); | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1076 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1077 | 	return (granularity + lim->alignment_offset - alignment) | 
 | 1078 | 		& (granularity - 1); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1079 | } | 
 | 1080 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1081 | static inline int bdev_alignment_offset(struct block_device *bdev) | 
 | 1082 | { | 
 | 1083 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 1084 |  | 
 | 1085 | 	if (q->limits.misaligned) | 
 | 1086 | 		return -1; | 
 | 1087 |  | 
 | 1088 | 	if (bdev != bdev->bd_contains) | 
 | 1089 | 		return bdev->bd_part->alignment_offset; | 
 | 1090 |  | 
 | 1091 | 	return q->limits.alignment_offset; | 
 | 1092 | } | 
 | 1093 |  | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1094 | static inline int queue_discard_alignment(struct request_queue *q) | 
 | 1095 | { | 
 | 1096 | 	if (q->limits.discard_misaligned) | 
 | 1097 | 		return -1; | 
 | 1098 |  | 
 | 1099 | 	return q->limits.discard_alignment; | 
 | 1100 | } | 
 | 1101 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1102 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1103 | { | 
| Martin K. Petersen | dd3d145 | 2010-01-11 03:21:48 -0500 | [diff] [blame] | 1104 | 	unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 
 | 1105 |  | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 1106 | 	if (!lim->max_discard_sectors) | 
 | 1107 | 		return 0; | 
 | 1108 |  | 
| Martin K. Petersen | dd3d145 | 2010-01-11 03:21:48 -0500 | [diff] [blame] | 1109 | 	return (lim->discard_granularity + lim->discard_alignment - alignment) | 
 | 1110 | 		& (lim->discard_granularity - 1); | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1111 | } | 
 | 1112 |  | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 1113 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 
 | 1114 | { | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 1115 | 	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 1116 | 		return 1; | 
 | 1117 |  | 
 | 1118 | 	return 0; | 
 | 1119 | } | 
 | 1120 |  | 
 | 1121 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | 
 | 1122 | { | 
 | 1123 | 	return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 
 | 1124 | } | 
 | 1125 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1126 | static inline int queue_dma_alignment(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | { | 
| Pete Wyckoff | 482eb68 | 2008-01-01 10:23:02 -0500 | [diff] [blame] | 1128 | 	return q ? q->dma_alignment : 511; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | } | 
 | 1130 |  | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1131 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1132 | 				 unsigned int len) | 
 | 1133 | { | 
 | 1134 | 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1135 | 	return !(addr & alignment) && !(len & alignment); | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1136 | } | 
 | 1137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | /* assumes size > 256 */ | 
 | 1139 | static inline unsigned int blksize_bits(unsigned int size) | 
 | 1140 | { | 
 | 1141 | 	unsigned int bits = 8; | 
 | 1142 | 	do { | 
 | 1143 | 		bits++; | 
 | 1144 | 		size >>= 1; | 
 | 1145 | 	} while (size > 256); | 
 | 1146 | 	return bits; | 
 | 1147 | } | 
 | 1148 |  | 
| Adrian Bunk | 2befb9e | 2005-09-10 00:27:17 -0700 | [diff] [blame] | 1149 | static inline unsigned int block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | { | 
 | 1151 | 	return bdev->bd_block_size; | 
 | 1152 | } | 
 | 1153 |  | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 1154 | static inline bool queue_flush_queueable(struct request_queue *q) | 
 | 1155 | { | 
 | 1156 | 	return !q->flush_not_queueable; | 
 | 1157 | } | 
 | 1158 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | typedef struct {struct page *v;} Sector; | 
 | 1160 |  | 
 | 1161 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 
 | 1162 |  | 
 | 1163 | static inline void put_dev_sector(Sector p) | 
 | 1164 | { | 
 | 1165 | 	page_cache_release(p.v); | 
 | 1166 | } | 
 | 1167 |  | 
 | 1168 | struct work_struct; | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 1169 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 |  | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1171 | #ifdef CONFIG_BLK_CGROUP | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1172 | /* | 
 | 1173 |  * This should not be using sched_clock(). A real patch is in progress | 
 | 1174 |  * to fix this up, until that is in place we need to disable preemption | 
 | 1175 |  * around sched_clock() in this function and set_io_start_time_ns(). | 
 | 1176 |  */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1177 | static inline void set_start_time_ns(struct request *req) | 
 | 1178 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1179 | 	preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1180 | 	req->start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1181 | 	preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1182 | } | 
 | 1183 |  | 
 | 1184 | static inline void set_io_start_time_ns(struct request *req) | 
 | 1185 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1186 | 	preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1187 | 	req->io_start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1188 | 	preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1189 | } | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1190 |  | 
 | 1191 | static inline uint64_t rq_start_time_ns(struct request *req) | 
 | 1192 | { | 
 | 1193 |         return req->start_time_ns; | 
 | 1194 | } | 
 | 1195 |  | 
 | 1196 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
 | 1197 | { | 
 | 1198 |         return req->io_start_time_ns; | 
 | 1199 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1200 | #else | 
 | 1201 | static inline void set_start_time_ns(struct request *req) {} | 
 | 1202 | static inline void set_io_start_time_ns(struct request *req) {} | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1203 | static inline uint64_t rq_start_time_ns(struct request *req) | 
 | 1204 | { | 
 | 1205 | 	return 0; | 
 | 1206 | } | 
 | 1207 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
 | 1208 | { | 
 | 1209 | 	return 0; | 
 | 1210 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1211 | #endif | 
 | 1212 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 
 | 1214 | 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 
 | 1215 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 
 | 1216 | 	MODULE_ALIAS("block-major-" __stringify(major) "-*") | 
 | 1217 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1218 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
 | 1219 |  | 
| Jens Axboe | b24498d | 2008-06-27 09:12:09 +0200 | [diff] [blame] | 1220 | #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */ | 
 | 1221 | #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */ | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1222 |  | 
 | 1223 | struct blk_integrity_exchg { | 
 | 1224 | 	void			*prot_buf; | 
 | 1225 | 	void			*data_buf; | 
 | 1226 | 	sector_t		sector; | 
 | 1227 | 	unsigned int		data_size; | 
 | 1228 | 	unsigned short		sector_size; | 
 | 1229 | 	const char		*disk_name; | 
 | 1230 | }; | 
 | 1231 |  | 
 | 1232 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | 
 | 1233 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | 
 | 1234 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | 
 | 1235 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | 
 | 1236 |  | 
 | 1237 | struct blk_integrity { | 
 | 1238 | 	integrity_gen_fn	*generate_fn; | 
 | 1239 | 	integrity_vrfy_fn	*verify_fn; | 
 | 1240 | 	integrity_set_tag_fn	*set_tag_fn; | 
 | 1241 | 	integrity_get_tag_fn	*get_tag_fn; | 
 | 1242 |  | 
 | 1243 | 	unsigned short		flags; | 
 | 1244 | 	unsigned short		tuple_size; | 
 | 1245 | 	unsigned short		sector_size; | 
 | 1246 | 	unsigned short		tag_size; | 
 | 1247 |  | 
 | 1248 | 	const char		*name; | 
 | 1249 |  | 
 | 1250 | 	struct kobject		kobj; | 
 | 1251 | }; | 
 | 1252 |  | 
| Mike Snitzer | a63a5cf | 2011-04-01 21:02:31 +0200 | [diff] [blame] | 1253 | extern bool blk_integrity_is_initialized(struct gendisk *); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1254 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 
 | 1255 | extern void blk_integrity_unregister(struct gendisk *); | 
| Martin K. Petersen | ad7fce9 | 2008-10-01 03:38:39 -0400 | [diff] [blame] | 1256 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1257 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 
 | 1258 | 				   struct scatterlist *); | 
 | 1259 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | 
 | 1260 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | 
 | 1261 | 				  struct request *); | 
 | 1262 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | 
 | 1263 | 				   struct bio *); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1264 |  | 
| Jens Axboe | b04accc | 2008-10-02 12:53:22 +0200 | [diff] [blame] | 1265 | static inline | 
 | 1266 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 
 | 1267 | { | 
 | 1268 | 	return bdev->bd_disk->integrity; | 
 | 1269 | } | 
 | 1270 |  | 
| Martin K. Petersen | b02739b | 2008-10-02 18:47:49 +0200 | [diff] [blame] | 1271 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 
 | 1272 | { | 
 | 1273 | 	return disk->integrity; | 
 | 1274 | } | 
 | 1275 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1276 | static inline int blk_integrity_rq(struct request *rq) | 
 | 1277 | { | 
| Martin K. Petersen | d442cc4 | 2008-07-16 16:09:06 -0400 | [diff] [blame] | 1278 | 	if (rq->bio == NULL) | 
 | 1279 | 		return 0; | 
 | 1280 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1281 | 	return bio_integrity(rq->bio); | 
 | 1282 | } | 
 | 1283 |  | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1284 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 
 | 1285 | 						    unsigned int segs) | 
 | 1286 | { | 
 | 1287 | 	q->limits.max_integrity_segments = segs; | 
 | 1288 | } | 
 | 1289 |  | 
 | 1290 | static inline unsigned short | 
 | 1291 | queue_max_integrity_segments(struct request_queue *q) | 
 | 1292 | { | 
 | 1293 | 	return q->limits.max_integrity_segments; | 
 | 1294 | } | 
 | 1295 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1296 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 
 | 1297 |  | 
| Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1298 | struct bio; | 
 | 1299 | struct block_device; | 
 | 1300 | struct gendisk; | 
 | 1301 | struct blk_integrity; | 
 | 1302 |  | 
 | 1303 | static inline int blk_integrity_rq(struct request *rq) | 
 | 1304 | { | 
 | 1305 | 	return 0; | 
 | 1306 | } | 
 | 1307 | static inline int blk_rq_count_integrity_sg(struct request_queue *q, | 
 | 1308 | 					    struct bio *b) | 
 | 1309 | { | 
 | 1310 | 	return 0; | 
 | 1311 | } | 
 | 1312 | static inline int blk_rq_map_integrity_sg(struct request_queue *q, | 
 | 1313 | 					  struct bio *b, | 
 | 1314 | 					  struct scatterlist *s) | 
 | 1315 | { | 
 | 1316 | 	return 0; | 
 | 1317 | } | 
 | 1318 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) | 
 | 1319 | { | 
 | 1320 | 	return 0; | 
 | 1321 | } | 
 | 1322 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 
 | 1323 | { | 
 | 1324 | 	return NULL; | 
 | 1325 | } | 
 | 1326 | static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) | 
 | 1327 | { | 
 | 1328 | 	return 0; | 
 | 1329 | } | 
 | 1330 | static inline int blk_integrity_register(struct gendisk *d, | 
 | 1331 | 					 struct blk_integrity *b) | 
 | 1332 | { | 
 | 1333 | 	return 0; | 
 | 1334 | } | 
 | 1335 | static inline void blk_integrity_unregister(struct gendisk *d) | 
 | 1336 | { | 
 | 1337 | } | 
 | 1338 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 
 | 1339 | 						    unsigned int segs) | 
 | 1340 | { | 
 | 1341 | } | 
 | 1342 | static inline unsigned short queue_max_integrity_segments(struct request_queue *q) | 
 | 1343 | { | 
 | 1344 | 	return 0; | 
 | 1345 | } | 
 | 1346 | static inline int blk_integrity_merge_rq(struct request_queue *rq, | 
 | 1347 | 					 struct request *r1, | 
 | 1348 | 					 struct request *r2) | 
 | 1349 | { | 
 | 1350 | 	return 0; | 
 | 1351 | } | 
 | 1352 | static inline int blk_integrity_merge_bio(struct request_queue *rq, | 
 | 1353 | 					  struct request *r, | 
 | 1354 | 					  struct bio *b) | 
 | 1355 | { | 
 | 1356 | 	return 0; | 
 | 1357 | } | 
 | 1358 | static inline bool blk_integrity_is_initialized(struct gendisk *g) | 
 | 1359 | { | 
 | 1360 | 	return 0; | 
 | 1361 | } | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1362 |  | 
 | 1363 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 
 | 1364 |  | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1365 | struct block_device_operations { | 
| Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1366 | 	int (*open) (struct block_device *, fmode_t); | 
 | 1367 | 	int (*release) (struct gendisk *, fmode_t); | 
| Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1368 | 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
 | 1369 | 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1370 | 	int (*direct_access) (struct block_device *, sector_t, | 
 | 1371 | 						void **, unsigned long *); | 
| Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1372 | 	unsigned int (*check_events) (struct gendisk *disk, | 
 | 1373 | 				      unsigned int clearing); | 
 | 1374 | 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */ | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1375 | 	int (*media_changed) (struct gendisk *); | 
| Tejun Heo | c3e33e0 | 2010-05-15 20:09:29 +0200 | [diff] [blame] | 1376 | 	void (*unlock_native_capacity) (struct gendisk *); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1377 | 	int (*revalidate_disk) (struct gendisk *); | 
 | 1378 | 	int (*getgeo)(struct block_device *, struct hd_geometry *); | 
| Nitin Gupta | b3a27d0 | 2010-05-17 11:02:43 +0530 | [diff] [blame] | 1379 | 	/* this callback is with swap_lock and sometimes page table lock held */ | 
 | 1380 | 	void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1381 | 	struct module *owner; | 
 | 1382 | }; | 
 | 1383 |  | 
| Al Viro | 633a08b | 2007-08-29 20:34:12 -0400 | [diff] [blame] | 1384 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 
 | 1385 | 				 unsigned long); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1386 | #else /* CONFIG_BLOCK */ | 
 | 1387 | /* | 
 | 1388 |  * stubs for when the block layer is configured out | 
 | 1389 |  */ | 
 | 1390 | #define buffer_heads_over_limit 0 | 
 | 1391 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1392 | static inline long nr_blockdev_pages(void) | 
 | 1393 | { | 
 | 1394 | 	return 0; | 
 | 1395 | } | 
 | 1396 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1397 | struct blk_plug { | 
 | 1398 | }; | 
 | 1399 |  | 
 | 1400 | static inline void blk_start_plug(struct blk_plug *plug) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1401 | { | 
 | 1402 | } | 
 | 1403 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1404 | static inline void blk_finish_plug(struct blk_plug *plug) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1405 | { | 
 | 1406 | } | 
 | 1407 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1408 | static inline void blk_flush_plug(struct task_struct *task) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1409 | { | 
 | 1410 | } | 
 | 1411 |  | 
| Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 1412 | static inline void blk_schedule_flush_plug(struct task_struct *task) | 
 | 1413 | { | 
 | 1414 | } | 
 | 1415 |  | 
 | 1416 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1417 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 
 | 1418 | { | 
 | 1419 | 	return false; | 
 | 1420 | } | 
 | 1421 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1422 | #endif /* CONFIG_BLOCK */ | 
 | 1423 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1424 | #endif |