| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_BLKDEV_H | 
 | 2 | #define _LINUX_BLKDEV_H | 
 | 3 |  | 
| Russell King | 85fd0bc | 2012-05-14 08:29:23 +0200 | [diff] [blame] | 4 | #include <linux/sched.h> | 
 | 5 |  | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 6 | #ifdef CONFIG_BLOCK | 
 | 7 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/major.h> | 
 | 9 | #include <linux/genhd.h> | 
 | 10 | #include <linux/list.h> | 
 | 11 | #include <linux/timer.h> | 
 | 12 | #include <linux/workqueue.h> | 
 | 13 | #include <linux/pagemap.h> | 
 | 14 | #include <linux/backing-dev.h> | 
 | 15 | #include <linux/wait.h> | 
 | 16 | #include <linux/mempool.h> | 
 | 17 | #include <linux/bio.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/stringify.h> | 
| Hugh Dickins | 3e6053d | 2008-09-11 10:57:55 +0200 | [diff] [blame] | 19 | #include <linux/gfp.h> | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 20 | #include <linux/bsg.h> | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 21 | #include <linux/smp.h> | 
| Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 22 | #include <linux/rcupdate.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
 | 24 | #include <asm/scatterlist.h> | 
 | 25 |  | 
| Paul Gortmaker | de47725 | 2011-05-26 13:46:22 -0400 | [diff] [blame] | 26 | struct module; | 
| Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 27 | struct scsi_ioctl_command; | 
 | 28 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | struct request_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | struct elevator_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | struct request_pm_state; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 32 | struct blk_trace; | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 33 | struct request; | 
 | 34 | struct sg_io_hdr; | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 35 | struct bsg_job; | 
| Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 36 | struct blkcg_gq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 |  | 
 | 38 | #define BLKDEV_MIN_RQ	4 | 
 | 39 | #define BLKDEV_MAX_RQ	128	/* Default maximum */ | 
 | 40 |  | 
| Tejun Heo | 8bd435b | 2012-04-13 13:11:28 -0700 | [diff] [blame] | 41 | /* | 
 | 42 |  * Maximum number of blkcg policies allowed to be registered concurrently. | 
 | 43 |  * Defined here to simplify include dependency. | 
 | 44 |  */ | 
 | 45 | #define BLKCG_MAX_POLS		2 | 
 | 46 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | struct request; | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 48 | typedef void (rq_end_io_fn)(struct request *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 50 | #define BLK_RL_SYNCFULL		(1U << 0) | 
 | 51 | #define BLK_RL_ASYNCFULL	(1U << 1) | 
 | 52 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | struct request_list { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 54 | 	struct request_queue	*q;	/* the queue this rl belongs to */ | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 55 | #ifdef CONFIG_BLK_CGROUP | 
 | 56 | 	struct blkcg_gq		*blkg;	/* blkg this request pool belongs to */ | 
 | 57 | #endif | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 58 | 	/* | 
 | 59 | 	 * count[], starved[], and wait[] are indexed by | 
 | 60 | 	 * BLK_RW_SYNC/BLK_RW_ASYNC | 
 | 61 | 	 */ | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 62 | 	int			count[2]; | 
 | 63 | 	int			starved[2]; | 
 | 64 | 	mempool_t		*rq_pool; | 
 | 65 | 	wait_queue_head_t	wait[2]; | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 66 | 	unsigned int		flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | }; | 
 | 68 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 69 | /* | 
 | 70 |  * request command types | 
 | 71 |  */ | 
 | 72 | enum rq_cmd_type_bits { | 
 | 73 | 	REQ_TYPE_FS		= 1,	/* fs request */ | 
 | 74 | 	REQ_TYPE_BLOCK_PC,		/* scsi command */ | 
 | 75 | 	REQ_TYPE_SENSE,			/* sense request */ | 
 | 76 | 	REQ_TYPE_PM_SUSPEND,		/* suspend request */ | 
 | 77 | 	REQ_TYPE_PM_RESUME,		/* resume request */ | 
 | 78 | 	REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 79 | 	REQ_TYPE_SPECIAL,		/* driver defined type */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 80 | 	/* | 
 | 81 | 	 * for ATA/ATAPI devices. this really doesn't belong here, ide should | 
 | 82 | 	 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 
 | 83 | 	 * private REQ_LB opcodes to differentiate what type of request this is | 
 | 84 | 	 */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 85 | 	REQ_TYPE_ATA_TASKFILE, | 
| Jens Axboe | cea2885 | 2006-10-12 15:08:45 +0200 | [diff] [blame] | 86 | 	REQ_TYPE_ATA_PC, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 87 | }; | 
 | 88 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | #define BLK_MAX_CDB	16 | 
 | 90 |  | 
 | 91 | /* | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 92 |  * try to put the fields that are referenced together in the same cacheline. | 
| Wanlong Gao | 4d0d98b6 | 2011-06-13 10:45:38 +0200 | [diff] [blame] | 93 |  * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 94 |  * as well! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 |  */ | 
 | 96 | struct request { | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 97 | 	struct list_head queuelist; | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 98 | 	struct call_single_data csd; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 99 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 100 | 	struct request_queue *q; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 101 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 102 | 	unsigned int cmd_flags; | 
 | 103 | 	enum rq_cmd_type_bits cmd_type; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 104 | 	unsigned long atomic_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 106 | 	int cpu; | 
 | 107 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 108 | 	/* the following two fields are internal, NEVER access directly */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 109 | 	unsigned int __data_len;	/* total data len */ | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 110 | 	sector_t __sector;		/* sector cursor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 |  | 
 | 112 | 	struct bio *bio; | 
 | 113 | 	struct bio *biotail; | 
 | 114 |  | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 115 | 	struct hlist_node hash;	/* merge hash */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 116 | 	/* | 
 | 117 | 	 * The rb_node is only used inside the io scheduler, requests | 
 | 118 | 	 * are pruned when moved to the dispatch queue. So let the | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 119 | 	 * completion_data share space with the rb_node. | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 120 | 	 */ | 
 | 121 | 	union { | 
 | 122 | 		struct rb_node rb_node;	/* sort/lookup */ | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 123 | 		void *completion_data; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 124 | 	}; | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 125 |  | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 126 | 	/* | 
| Vivek Goyal | 7f1dc8a | 2010-04-21 17:44:16 +0200 | [diff] [blame] | 127 | 	 * Three pointers are available for the IO schedulers, if they need | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 128 | 	 * more they have to dynamically allocate it.  Flush requests are | 
 | 129 | 	 * never put on the IO scheduler. So let the flush fields share | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 130 | 	 * space with the elevator data. | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 131 | 	 */ | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 132 | 	union { | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 133 | 		struct { | 
 | 134 | 			struct io_cq		*icq; | 
 | 135 | 			void			*priv[2]; | 
 | 136 | 		} elv; | 
 | 137 |  | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 138 | 		struct { | 
 | 139 | 			unsigned int		seq; | 
 | 140 | 			struct list_head	list; | 
| Jeff Moyer | 4853aba | 2011-08-15 21:37:25 +0200 | [diff] [blame] | 141 | 			rq_end_io_fn		*saved_end_io; | 
| Mike Snitzer | c186794 | 2011-02-11 11:08:00 +0100 | [diff] [blame] | 142 | 		} flush; | 
 | 143 | 	}; | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 144 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 145 | 	struct gendisk *rq_disk; | 
| Jerome Marchand | 09e099d | 2011-01-05 16:57:38 +0100 | [diff] [blame] | 146 | 	struct hd_struct *part; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | 	unsigned long start_time; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 148 | #ifdef CONFIG_BLK_CGROUP | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 149 | 	struct request_list *rl;		/* rl this rq is alloced from */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 150 | 	unsigned long long start_time_ns; | 
 | 151 | 	unsigned long long io_start_time_ns;    /* when passed to hardware */ | 
 | 152 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | 	/* Number of scatter-gather DMA addr+len pairs after | 
 | 154 | 	 * physical address coalescing is performed. | 
 | 155 | 	 */ | 
 | 156 | 	unsigned short nr_phys_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 157 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
 | 158 | 	unsigned short nr_integrity_segments; | 
 | 159 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 161 | 	unsigned short ioprio; | 
 | 162 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 163 | 	int ref_count; | 
 | 164 |  | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 165 | 	void *special;		/* opaque pointer available for LLD use */ | 
 | 166 | 	char *buffer;		/* kaddr of the current segment if available */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 |  | 
| Jens Axboe | cdd6026 | 2006-07-28 09:32:07 +0200 | [diff] [blame] | 168 | 	int tag; | 
 | 169 | 	int errors; | 
 | 170 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | 	/* | 
 | 172 | 	 * when request is used as a packet command carrier | 
 | 173 | 	 */ | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 174 | 	unsigned char __cmd[BLK_MAX_CDB]; | 
 | 175 | 	unsigned char *cmd; | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 176 | 	unsigned short cmd_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 |  | 
| FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 178 | 	unsigned int extra_len;	/* length of alignment and padding */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | 	unsigned int sense_len; | 
| Tejun Heo | c3a4d78 | 2009-05-07 22:24:37 +0900 | [diff] [blame] | 180 | 	unsigned int resid_len;	/* residual count */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | 	void *sense; | 
 | 182 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 183 | 	unsigned long deadline; | 
 | 184 | 	struct list_head timeout_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | 	unsigned int timeout; | 
| Mike Christie | 17e01f2 | 2005-11-11 05:31:37 -0600 | [diff] [blame] | 186 | 	int retries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 |  | 
 | 188 | 	/* | 
| Jens Axboe | c00895a | 2006-09-30 20:29:12 +0200 | [diff] [blame] | 189 | 	 * completion callback. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | 	 */ | 
 | 191 | 	rq_end_io_fn *end_io; | 
 | 192 | 	void *end_io_data; | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 193 |  | 
 | 194 | 	/* for bidi */ | 
 | 195 | 	struct request *next_rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | }; | 
 | 197 |  | 
| Fernando Luis Vázquez Cao | 766ca44 | 2008-08-14 09:59:13 +0200 | [diff] [blame] | 198 | static inline unsigned short req_get_ioprio(struct request *req) | 
 | 199 | { | 
 | 200 | 	return req->ioprio; | 
 | 201 | } | 
 | 202 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | /* | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 204 |  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 |  * requests. Some step values could eventually be made generic. | 
 | 206 |  */ | 
 | 207 | struct request_pm_state | 
 | 208 | { | 
 | 209 | 	/* PM state machine step value, currently driver specific */ | 
 | 210 | 	int	pm_step; | 
 | 211 | 	/* requested PM state value (S1, S2, S3, S4, ...) */ | 
 | 212 | 	u32	pm_state; | 
 | 213 | 	void*	data;		/* for driver use */ | 
 | 214 | }; | 
 | 215 |  | 
 | 216 | #include <linux/elevator.h> | 
 | 217 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 218 | typedef void (request_fn_proc) (struct request_queue *q); | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 219 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 220 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 221 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
 | 223 | struct bio_vec; | 
| Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 224 | struct bvec_merge_data { | 
 | 225 | 	struct block_device *bi_bdev; | 
 | 226 | 	sector_t bi_sector; | 
 | 227 | 	unsigned bi_size; | 
 | 228 | 	unsigned long bi_rw; | 
 | 229 | }; | 
 | 230 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 
 | 231 | 			     struct bio_vec *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 232 | typedef void (softirq_done_fn)(struct request *); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 233 | typedef int (dma_drain_needed_fn)(struct request *); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 234 | typedef int (lld_busy_fn) (struct request_queue *q); | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 235 | typedef int (bsg_job_fn) (struct bsg_job *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 237 | enum blk_eh_timer_return { | 
 | 238 | 	BLK_EH_NOT_HANDLED, | 
 | 239 | 	BLK_EH_HANDLED, | 
 | 240 | 	BLK_EH_RESET_TIMER, | 
 | 241 | }; | 
 | 242 |  | 
 | 243 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | 
 | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | enum blk_queue_state { | 
 | 246 | 	Queue_down, | 
 | 247 | 	Queue_up, | 
 | 248 | }; | 
 | 249 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | struct blk_queue_tag { | 
 | 251 | 	struct request **tag_index;	/* map of busy tags */ | 
 | 252 | 	unsigned long *tag_map;		/* bit map of free/busy tags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | 	int busy;			/* current depth */ | 
 | 254 | 	int max_depth;			/* what we will send to device */ | 
| Tejun Heo | ba02508 | 2005-08-05 13:28:11 -0700 | [diff] [blame] | 255 | 	int real_max_depth;		/* what the array can hold */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | 	atomic_t refcnt;		/* map can be shared */ | 
 | 257 | }; | 
 | 258 |  | 
| FUJITA Tomonori | abf5439 | 2008-08-16 14:10:05 +0900 | [diff] [blame] | 259 | #define BLK_SCSI_MAX_CMDS	(256) | 
 | 260 | #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 
 | 261 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 262 | struct queue_limits { | 
 | 263 | 	unsigned long		bounce_pfn; | 
 | 264 | 	unsigned long		seg_boundary_mask; | 
 | 265 |  | 
 | 266 | 	unsigned int		max_hw_sectors; | 
 | 267 | 	unsigned int		max_sectors; | 
 | 268 | 	unsigned int		max_segment_size; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 269 | 	unsigned int		physical_block_size; | 
 | 270 | 	unsigned int		alignment_offset; | 
 | 271 | 	unsigned int		io_min; | 
 | 272 | 	unsigned int		io_opt; | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 273 | 	unsigned int		max_discard_sectors; | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 274 | 	unsigned int		max_write_same_sectors; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 275 | 	unsigned int		discard_granularity; | 
 | 276 | 	unsigned int		discard_alignment; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 277 |  | 
 | 278 | 	unsigned short		logical_block_size; | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 279 | 	unsigned short		max_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 280 | 	unsigned short		max_integrity_segments; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 281 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 282 | 	unsigned char		misaligned; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 283 | 	unsigned char		discard_misaligned; | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 284 | 	unsigned char		cluster; | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 285 | 	unsigned char		discard_zeroes_data; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 286 | }; | 
 | 287 |  | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 288 | struct request_queue { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | 	/* | 
 | 290 | 	 * Together with queue_head for cacheline sharing | 
 | 291 | 	 */ | 
 | 292 | 	struct list_head	queue_head; | 
 | 293 | 	struct request		*last_merge; | 
| Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 294 | 	struct elevator_queue	*elevator; | 
| Tejun Heo | 8a5ecdd | 2012-06-04 20:40:58 -0700 | [diff] [blame] | 295 | 	int			nr_rqs[2];	/* # allocated [a]sync rqs */ | 
 | 296 | 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 |  | 
 | 298 | 	/* | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 299 | 	 * If blkcg is not used, @q->root_rl serves all requests.  If blkcg | 
 | 300 | 	 * is used, root blkg allocates from @q->root_rl and all other | 
 | 301 | 	 * blkgs from their own blkg->rl.  Which one to use should be | 
 | 302 | 	 * determined using bio_request_list(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | 	 */ | 
| Tejun Heo | a051661 | 2012-06-26 15:05:44 -0700 | [diff] [blame] | 304 | 	struct request_list	root_rl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 |  | 
 | 306 | 	request_fn_proc		*request_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | 	make_request_fn		*make_request_fn; | 
 | 308 | 	prep_rq_fn		*prep_rq_fn; | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 309 | 	unprep_rq_fn		*unprep_rq_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | 	merge_bvec_fn		*merge_bvec_fn; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 311 | 	softirq_done_fn		*softirq_done_fn; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 312 | 	rq_timed_out_fn		*rq_timed_out_fn; | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 313 | 	dma_drain_needed_fn	*dma_drain_needed; | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 314 | 	lld_busy_fn		*lld_busy_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 |  | 
 | 316 | 	/* | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 317 | 	 * Dispatch queue sorting | 
 | 318 | 	 */ | 
| Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 319 | 	sector_t		end_sector; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 320 | 	struct request		*boundary_rq; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 321 |  | 
 | 322 | 	/* | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 323 | 	 * Delayed queue handling | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | 	 */ | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 325 | 	struct delayed_work	delay_work; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 |  | 
 | 327 | 	struct backing_dev_info	backing_dev_info; | 
 | 328 |  | 
 | 329 | 	/* | 
 | 330 | 	 * The queue owner gets to use this for whatever they like. | 
 | 331 | 	 * ll_rw_blk doesn't touch it. | 
 | 332 | 	 */ | 
 | 333 | 	void			*queuedata; | 
 | 334 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | 	/* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | 	 * various queue flags, see QUEUE_* below | 
 | 337 | 	 */ | 
 | 338 | 	unsigned long		queue_flags; | 
 | 339 |  | 
 | 340 | 	/* | 
| Tejun Heo | a73f730 | 2011-12-14 00:33:37 +0100 | [diff] [blame] | 341 | 	 * ida allocated id for this queue.  Used to index queues from | 
 | 342 | 	 * ioctx. | 
 | 343 | 	 */ | 
 | 344 | 	int			id; | 
 | 345 |  | 
 | 346 | 	/* | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 347 | 	 * queue needs bounce pages for pages above this limit | 
 | 348 | 	 */ | 
 | 349 | 	gfp_t			bounce_gfp; | 
 | 350 |  | 
 | 351 | 	/* | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 352 | 	 * protects queue structures from reentrancy. ->__queue_lock should | 
 | 353 | 	 * _never_ be used directly, it is queue private. always use | 
 | 354 | 	 * ->queue_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | 	 */ | 
 | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 356 | 	spinlock_t		__queue_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | 	spinlock_t		*queue_lock; | 
 | 358 |  | 
 | 359 | 	/* | 
 | 360 | 	 * queue kobject | 
 | 361 | 	 */ | 
 | 362 | 	struct kobject kobj; | 
 | 363 |  | 
| Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 364 | #ifdef CONFIG_PM_RUNTIME | 
 | 365 | 	struct device		*dev; | 
 | 366 | 	int			rpm_status; | 
 | 367 | 	unsigned int		nr_pending; | 
 | 368 | #endif | 
 | 369 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | 	/* | 
 | 371 | 	 * queue settings | 
 | 372 | 	 */ | 
 | 373 | 	unsigned long		nr_requests;	/* Max # of requests */ | 
 | 374 | 	unsigned int		nr_congestion_on; | 
 | 375 | 	unsigned int		nr_congestion_off; | 
 | 376 | 	unsigned int		nr_batching; | 
 | 377 |  | 
| James Bottomley | fa0ccd8 | 2008-01-10 11:30:36 -0600 | [diff] [blame] | 378 | 	unsigned int		dma_drain_size; | 
| Richard Kennedy | d7b7630 | 2011-07-13 21:17:23 +0200 | [diff] [blame] | 379 | 	void			*dma_drain_buffer; | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 380 | 	unsigned int		dma_pad_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | 	unsigned int		dma_alignment; | 
 | 382 |  | 
 | 383 | 	struct blk_queue_tag	*queue_tags; | 
| Jens Axboe | 6eca900 | 2007-10-25 10:14:47 +0200 | [diff] [blame] | 384 | 	struct list_head	tag_busy_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 |  | 
| Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 386 | 	unsigned int		nr_sorted; | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 387 | 	unsigned int		in_flight[2]; | 
| Bart Van Assche | 24faf6f | 2012-11-28 13:46:45 +0100 | [diff] [blame] | 388 | 	/* | 
 | 389 | 	 * Number of active block driver functions for which blk_drain_queue() | 
 | 390 | 	 * must wait. Must be incremented around functions that unlock the | 
 | 391 | 	 * queue_lock internally, e.g. scsi_request_fn(). | 
 | 392 | 	 */ | 
 | 393 | 	unsigned int		request_fn_active; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 395 | 	unsigned int		rq_timeout; | 
 | 396 | 	struct timer_list	timeout; | 
 | 397 | 	struct list_head	timeout_list; | 
 | 398 |  | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 399 | 	struct list_head	icq_list; | 
| Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 400 | #ifdef CONFIG_BLK_CGROUP | 
| Tejun Heo | a2b1693 | 2012-04-13 13:11:33 -0700 | [diff] [blame] | 401 | 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS); | 
| Tejun Heo | 3c79839 | 2012-04-16 13:57:25 -0700 | [diff] [blame] | 402 | 	struct blkcg_gq		*root_blkg; | 
| Tejun Heo | 03aa264a | 2012-03-05 13:15:19 -0800 | [diff] [blame] | 403 | 	struct list_head	blkg_list; | 
| Tejun Heo | 4eef304 | 2012-03-05 13:15:18 -0800 | [diff] [blame] | 404 | #endif | 
| Tejun Heo | a612fdd | 2011-12-14 00:33:41 +0100 | [diff] [blame] | 405 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 406 | 	struct queue_limits	limits; | 
 | 407 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | 	/* | 
 | 409 | 	 * sg stuff | 
 | 410 | 	 */ | 
 | 411 | 	unsigned int		sg_timeout; | 
 | 412 | 	unsigned int		sg_reserved_size; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 413 | 	int			node; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 414 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 415 | 	struct blk_trace	*blk_trace; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 416 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | 	/* | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 418 | 	 * for flush operations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 	 */ | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 420 | 	unsigned int		flush_flags; | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 421 | 	unsigned int		flush_not_queueable:1; | 
| shaohua.li@intel.com | 3ac0cc4 | 2011-05-06 11:34:41 -0600 | [diff] [blame] | 422 | 	unsigned int		flush_queue_delayed:1; | 
| Tejun Heo | ae1b153 | 2011-01-25 12:43:54 +0100 | [diff] [blame] | 423 | 	unsigned int		flush_pending_idx:1; | 
 | 424 | 	unsigned int		flush_running_idx:1; | 
 | 425 | 	unsigned long		flush_pending_since; | 
 | 426 | 	struct list_head	flush_queue[2]; | 
 | 427 | 	struct list_head	flush_data_in_flight; | 
| Tejun Heo | dd4c133 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 428 | 	struct request		flush_rq; | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 429 |  | 
 | 430 | 	struct mutex		sysfs_lock; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 431 |  | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 432 | 	int			bypass_depth; | 
 | 433 |  | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 434 | #if defined(CONFIG_BLK_DEV_BSG) | 
| Mike Christie | aa387cc | 2011-07-31 22:05:09 +0200 | [diff] [blame] | 435 | 	bsg_job_fn		*bsg_job_fn; | 
 | 436 | 	int			bsg_job_size; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 437 | 	struct bsg_class_device bsg_dev; | 
 | 438 | #endif | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 439 |  | 
| Tejun Heo | 923adde | 2012-03-05 13:15:13 -0800 | [diff] [blame] | 440 | #ifdef CONFIG_BLK_CGROUP | 
 | 441 | 	struct list_head	all_q_node; | 
 | 442 | #endif | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 443 | #ifdef CONFIG_BLK_DEV_THROTTLING | 
 | 444 | 	/* Throttle data */ | 
 | 445 | 	struct throtl_data *td; | 
 | 446 | #endif | 
| Tejun Heo | 548bc8e | 2013-01-09 08:05:13 -0800 | [diff] [blame] | 447 | 	struct rcu_head		rcu_head; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | }; | 
 | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */ | 
 | 451 | #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 452 | #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */ | 
 | 453 | #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */ | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 454 | #define QUEUE_FLAG_DYING	5	/* queue being torn down */ | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 455 | #define QUEUE_FLAG_BYPASS	6	/* act as dumb FIFO queue */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 456 | #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */ | 
 | 457 | #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */ | 
| Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 458 | #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 459 | #define QUEUE_FLAG_FAIL_IO     10	/* fake timeout */ | 
 | 460 | #define QUEUE_FLAG_STACKABLE   11	/* supports request stacking */ | 
 | 461 | #define QUEUE_FLAG_NONROT      12	/* non-rotational device (SSD) */ | 
| Fernando Luis Vázquez Cao | 88e740f | 2008-10-27 18:44:46 +0900 | [diff] [blame] | 462 | #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */ | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 463 | #define QUEUE_FLAG_IO_STAT     13	/* do IO stats */ | 
 | 464 | #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */ | 
 | 465 | #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */ | 
 | 466 | #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */ | 
 | 467 | #define QUEUE_FLAG_SECDISCARD  17	/* supports SECDISCARD */ | 
| Dan Williams | 5757a6d | 2011-07-23 20:44:25 +0200 | [diff] [blame] | 468 | #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */ | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 469 | #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */ | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 470 |  | 
 | 471 | #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\ | 
| Jens Axboe | 01e97f6 | 2009-09-03 20:06:47 +0200 | [diff] [blame] | 472 | 				 (1 << QUEUE_FLAG_STACKABLE)	|	\ | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 473 | 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\ | 
 | 474 | 				 (1 << QUEUE_FLAG_ADD_RANDOM)) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 475 |  | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 476 | static inline void queue_lockdep_assert_held(struct request_queue *q) | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 477 | { | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 478 | 	if (q->queue_lock) | 
 | 479 | 		lockdep_assert_held(q->queue_lock); | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 480 | } | 
 | 481 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 482 | static inline void queue_flag_set_unlocked(unsigned int flag, | 
 | 483 | 					   struct request_queue *q) | 
 | 484 | { | 
 | 485 | 	__set_bit(flag, &q->queue_flags); | 
 | 486 | } | 
 | 487 |  | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 488 | static inline int queue_flag_test_and_clear(unsigned int flag, | 
 | 489 | 					    struct request_queue *q) | 
 | 490 | { | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 491 | 	queue_lockdep_assert_held(q); | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 492 |  | 
 | 493 | 	if (test_bit(flag, &q->queue_flags)) { | 
 | 494 | 		__clear_bit(flag, &q->queue_flags); | 
 | 495 | 		return 1; | 
 | 496 | 	} | 
 | 497 |  | 
 | 498 | 	return 0; | 
 | 499 | } | 
 | 500 |  | 
 | 501 | static inline int queue_flag_test_and_set(unsigned int flag, | 
 | 502 | 					  struct request_queue *q) | 
 | 503 | { | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 504 | 	queue_lockdep_assert_held(q); | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 505 |  | 
 | 506 | 	if (!test_bit(flag, &q->queue_flags)) { | 
 | 507 | 		__set_bit(flag, &q->queue_flags); | 
 | 508 | 		return 0; | 
 | 509 | 	} | 
 | 510 |  | 
 | 511 | 	return 1; | 
 | 512 | } | 
 | 513 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 514 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 
 | 515 | { | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 516 | 	queue_lockdep_assert_held(q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 517 | 	__set_bit(flag, &q->queue_flags); | 
 | 518 | } | 
 | 519 |  | 
 | 520 | static inline void queue_flag_clear_unlocked(unsigned int flag, | 
 | 521 | 					     struct request_queue *q) | 
 | 522 | { | 
 | 523 | 	__clear_bit(flag, &q->queue_flags); | 
 | 524 | } | 
 | 525 |  | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 526 | static inline int queue_in_flight(struct request_queue *q) | 
 | 527 | { | 
 | 528 | 	return q->in_flight[0] + q->in_flight[1]; | 
 | 529 | } | 
 | 530 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 531 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 
 | 532 | { | 
| Andi Kleen | 8bcb6c7 | 2012-03-30 12:33:28 +0200 | [diff] [blame] | 533 | 	queue_lockdep_assert_held(q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 534 | 	__clear_bit(flag, &q->queue_flags); | 
 | 535 | } | 
 | 536 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 
 | 538 | #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 
| Bart Van Assche | 3f3299d | 2012-11-28 13:42:38 +0100 | [diff] [blame] | 539 | #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) | 
| Bart Van Assche | c246e80 | 2012-12-06 14:32:01 +0100 | [diff] [blame] | 540 | #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 
| Tejun Heo | d732580 | 2012-03-05 13:14:58 -0800 | [diff] [blame] | 541 | #define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | 
| Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 542 | #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 
| Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 543 | #define blk_queue_noxmerges(q)	\ | 
 | 544 | 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 
| Jens Axboe | a68bbdd | 2008-09-24 13:03:33 +0200 | [diff] [blame] | 545 | #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 546 | #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 547 | #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 
| Kiyoshi Ueda | 4ee5eaf | 2008-09-18 10:46:13 -0400 | [diff] [blame] | 548 | #define blk_queue_stackable(q)	\ | 
 | 549 | 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 
| Christoph Hellwig | c15227d | 2009-09-30 13:52:12 +0200 | [diff] [blame] | 550 | #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 551 | #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \ | 
 | 552 | 	test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 554 | #define blk_noretry_request(rq) \ | 
 | 555 | 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 
 | 556 | 			     REQ_FAILFAST_DRIVER)) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 557 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 558 | #define blk_account_rq(rq) \ | 
 | 559 | 	(((rq)->cmd_flags & REQ_STARTED) && \ | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 560 | 	 ((rq)->cmd_type == REQ_TYPE_FS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | #define blk_pm_request(rq)	\ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 563 | 	((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | 
 | 564 | 	 (rq)->cmd_type == REQ_TYPE_PM_RESUME) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 |  | 
| Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 566 | #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1) | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 567 | #define blk_bidi_rq(rq)		((rq)->next_rq != NULL) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 568 | /* rq->queuelist of dequeued request must be list_empty() */ | 
 | 569 | #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 |  | 
 | 571 | #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist) | 
 | 572 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 573 | #define rq_data_dir(rq)		((rq)->cmd_flags & 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 |  | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 575 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | 
 | 576 | { | 
 | 577 | 	return q->limits.cluster; | 
 | 578 | } | 
 | 579 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 580 | /* | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 581 |  * We regard a request as sync, if either a read or a sync write | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 582 |  */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 583 | static inline bool rw_is_sync(unsigned int rw_flags) | 
 | 584 | { | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 585 | 	return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 586 | } | 
 | 587 |  | 
 | 588 | static inline bool rq_is_sync(struct request *rq) | 
 | 589 | { | 
 | 590 | 	return rw_is_sync(rq->cmd_flags); | 
 | 591 | } | 
 | 592 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 593 | static inline bool blk_rl_full(struct request_list *rl, bool sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 595 | 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; | 
 | 596 |  | 
 | 597 | 	return rl->flags & flag; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | } | 
 | 599 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 600 | static inline void blk_set_rl_full(struct request_list *rl, bool sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 602 | 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; | 
 | 603 |  | 
 | 604 | 	rl->flags |= flag; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | } | 
 | 606 |  | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 607 | static inline void blk_clear_rl_full(struct request_list *rl, bool sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | { | 
| Tejun Heo | 5b788ce | 2012-06-04 20:40:59 -0700 | [diff] [blame] | 609 | 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; | 
 | 610 |  | 
 | 611 | 	rl->flags &= ~flag; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | } | 
 | 613 |  | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 614 | static inline bool rq_mergeable(struct request *rq) | 
 | 615 | { | 
 | 616 | 	if (rq->cmd_type != REQ_TYPE_FS) | 
 | 617 | 		return false; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 |  | 
| Martin K. Petersen | e2a60da | 2012-09-18 12:19:25 -0400 | [diff] [blame] | 619 | 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS) | 
 | 620 | 		return false; | 
 | 621 |  | 
 | 622 | 	return true; | 
 | 623 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 |  | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 625 | static inline bool blk_check_merge_flags(unsigned int flags1, | 
 | 626 | 					 unsigned int flags2) | 
 | 627 | { | 
 | 628 | 	if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD)) | 
 | 629 | 		return false; | 
 | 630 |  | 
 | 631 | 	if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE)) | 
 | 632 | 		return false; | 
 | 633 |  | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 634 | 	if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME)) | 
 | 635 | 		return false; | 
 | 636 |  | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 637 | 	return true; | 
 | 638 | } | 
 | 639 |  | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 640 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) | 
 | 641 | { | 
 | 642 | 	if (bio_data(a) == bio_data(b)) | 
 | 643 | 		return true; | 
 | 644 |  | 
 | 645 | 	return false; | 
 | 646 | } | 
 | 647 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 |  * q->prep_rq_fn return values | 
 | 650 |  */ | 
 | 651 | #define BLKPREP_OK		0	/* serve it */ | 
 | 652 | #define BLKPREP_KILL		1	/* fatal error, kill */ | 
 | 653 | #define BLKPREP_DEFER		2	/* leave on queue */ | 
 | 654 |  | 
 | 655 | extern unsigned long blk_max_low_pfn, blk_max_pfn; | 
 | 656 |  | 
 | 657 | /* | 
 | 658 |  * standard bounce addresses: | 
 | 659 |  * | 
 | 660 |  * BLK_BOUNCE_HIGH	: bounce all highmem pages | 
 | 661 |  * BLK_BOUNCE_ANY	: don't bounce anything | 
 | 662 |  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary | 
 | 663 |  */ | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 664 |  | 
 | 665 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT) | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 667 | #else | 
 | 668 | #define BLK_BOUNCE_HIGH		-1ULL | 
 | 669 | #endif | 
 | 670 | #define BLK_BOUNCE_ANY		(-1ULL) | 
| FUJITA Tomonori | bfe1723 | 2010-05-31 15:59:03 +0900 | [diff] [blame] | 671 | #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 |  | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 673 | /* | 
 | 674 |  * default timeout for SG_IO if none specified | 
 | 675 |  */ | 
 | 676 | #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ) | 
| Linus Torvalds | f2f1fa7 | 2008-12-05 14:49:18 -0800 | [diff] [blame] | 677 | #define BLK_MIN_SG_TIMEOUT	(7 * HZ) | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 678 |  | 
| Christoph Lameter | 2a7326b | 2007-07-17 04:03:37 -0700 | [diff] [blame] | 679 | #ifdef CONFIG_BOUNCE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | extern int init_emergency_isa_pool(void); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 681 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 682 | #else | 
 | 683 | static inline int init_emergency_isa_pool(void) | 
 | 684 | { | 
 | 685 | 	return 0; | 
 | 686 | } | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 687 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | { | 
 | 689 | } | 
 | 690 | #endif /* CONFIG_MMU */ | 
 | 691 |  | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 692 | struct rq_map_data { | 
 | 693 | 	struct page **pages; | 
 | 694 | 	int page_order; | 
 | 695 | 	int nr_entries; | 
| FUJITA Tomonori | 56c451f | 2008-12-18 14:49:37 +0900 | [diff] [blame] | 696 | 	unsigned long offset; | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 697 | 	int null_mapped; | 
| FUJITA Tomonori | ecb554a | 2009-07-09 14:46:53 +0200 | [diff] [blame] | 698 | 	int from_user; | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 699 | }; | 
 | 700 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 701 | struct req_iterator { | 
 | 702 | 	int i; | 
 | 703 | 	struct bio *bio; | 
 | 704 | }; | 
 | 705 |  | 
 | 706 | /* This should not be used directly - use rq_for_each_segment */ | 
| Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 707 | #define for_each_bio(_bio)		\ | 
 | 708 | 	for (; _bio; _bio = _bio->bi_next) | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 709 | #define __rq_for_each_bio(_bio, rq)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | 	if ((rq->bio))			\ | 
 | 711 | 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 
 | 712 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 713 | #define rq_for_each_segment(bvl, _rq, _iter)			\ | 
 | 714 | 	__rq_for_each_bio(_iter.bio, _rq)			\ | 
 | 715 | 		bio_for_each_segment(bvl, _iter.bio, _iter.i) | 
 | 716 |  | 
 | 717 | #define rq_iter_last(rq, _iter)					\ | 
 | 718 | 		(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 
 | 719 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 720 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
 | 721 | # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 
 | 722 | #endif | 
 | 723 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
 | 724 | extern void rq_flush_dcache_pages(struct request *rq); | 
 | 725 | #else | 
 | 726 | static inline void rq_flush_dcache_pages(struct request *rq) | 
 | 727 | { | 
 | 728 | } | 
 | 729 | #endif | 
 | 730 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | extern int blk_register_queue(struct gendisk *disk); | 
 | 732 | extern void blk_unregister_queue(struct gendisk *disk); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | extern void generic_make_request(struct bio *bio); | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 734 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | extern void blk_put_request(struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 736 | extern void __blk_put_request(struct request_queue *, struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 737 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 738 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 
 | 739 | 					gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 740 | extern void blk_requeue_request(struct request_queue *, struct request *); | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 741 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 
 | 742 | 		unsigned int len); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 743 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 744 | extern int blk_lld_busy(struct request_queue *q); | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 745 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
 | 746 | 			     struct bio_set *bs, gfp_t gfp_mask, | 
 | 747 | 			     int (*bio_ctr)(struct bio *, struct bio *, void *), | 
 | 748 | 			     void *data); | 
 | 749 | extern void blk_rq_unprep_clone(struct request *rq); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 750 | extern int blk_insert_cloned_request(struct request_queue *q, | 
 | 751 | 				     struct request *rq); | 
| Jens Axboe | 3cca6dc | 2011-03-02 11:08:00 -0500 | [diff] [blame] | 752 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 753 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 
| Paolo Bonzini | 0bfc96c | 2012-01-12 16:01:28 +0100 | [diff] [blame] | 754 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); | 
| Paolo Bonzini | 577ebb3 | 2012-01-12 16:01:27 +0100 | [diff] [blame] | 755 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, | 
 | 756 | 			      unsigned int, void __user *); | 
| Al Viro | 74f3c8a | 2007-08-27 15:38:10 -0400 | [diff] [blame] | 757 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
 | 758 | 			  unsigned int, void __user *); | 
| Al Viro | e915e87 | 2008-09-02 17:16:41 -0400 | [diff] [blame] | 759 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
 | 760 | 			 struct scsi_ioctl_command __user *); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 761 |  | 
| Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 762 | extern void blk_queue_bio(struct request_queue *q, struct bio *bio); | 
| Christoph Hellwig | 166e1f9 | 2011-09-12 12:08:27 +0200 | [diff] [blame] | 763 |  | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 764 | /* | 
 | 765 |  * A queue has just exitted congestion.  Note this in the global counter of | 
 | 766 |  * congested queues, and wake up anyone who was waiting for requests to be | 
 | 767 |  * put back. | 
 | 768 |  */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 769 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 770 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 771 | 	clear_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 772 | } | 
 | 773 |  | 
 | 774 | /* | 
 | 775 |  * A queue has just entered congestion.  Flag that in the queue's VM-visible | 
 | 776 |  * state flags and increment the global gounter of congested queues. | 
 | 777 |  */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 778 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 779 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 780 | 	set_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 781 | } | 
 | 782 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 783 | extern void blk_start_queue(struct request_queue *q); | 
 | 784 | extern void blk_stop_queue(struct request_queue *q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 785 | extern void blk_sync_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 786 | extern void __blk_stop_queue(struct request_queue *q); | 
| Christoph Hellwig | 24ecfbe | 2011-04-18 11:41:33 +0200 | [diff] [blame] | 787 | extern void __blk_run_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 788 | extern void blk_run_queue(struct request_queue *); | 
| Jens Axboe | c21e6be | 2011-04-19 13:32:46 +0200 | [diff] [blame] | 789 | extern void blk_run_queue_async(struct request_queue *q); | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 790 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 791 | 			   struct rq_map_data *, void __user *, unsigned long, | 
 | 792 | 			   gfp_t); | 
| Jens Axboe | 8e5cfc4 | 2006-12-19 11:12:46 +0100 | [diff] [blame] | 793 | extern int blk_rq_unmap_user(struct bio *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 794 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 
 | 795 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 796 | 			       struct rq_map_data *, struct sg_iovec *, int, | 
 | 797 | 			       unsigned int, gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 798 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 
| James Bottomley  | 994ca9a | 2005-06-20 14:11:09 +0200 | [diff] [blame] | 799 | 			  struct request *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 800 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 
| Jens Axboe | 15fc858 | 2006-01-06 10:00:50 +0100 | [diff] [blame] | 801 | 				  struct request *, int, rq_end_io_fn *); | 
| Mike Christie | 6e39b69 | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 802 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 803 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | { | 
 | 805 | 	return bdev->bd_disk->queue; | 
 | 806 | } | 
 | 807 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | /* | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 809 |  * blk_rq_pos()			: the current sector | 
 | 810 |  * blk_rq_bytes()		: bytes left in the entire request | 
 | 811 |  * blk_rq_cur_bytes()		: bytes left in the current segment | 
 | 812 |  * blk_rq_err_bytes()		: bytes left till the next error boundary | 
 | 813 |  * blk_rq_sectors()		: sectors left in the entire request | 
 | 814 |  * blk_rq_cur_sectors()		: sectors left in the current segment | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 815 |  */ | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 816 | static inline sector_t blk_rq_pos(const struct request *rq) | 
 | 817 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 818 | 	return rq->__sector; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 819 | } | 
 | 820 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 821 | static inline unsigned int blk_rq_bytes(const struct request *rq) | 
 | 822 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 823 | 	return rq->__data_len; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 824 | } | 
 | 825 |  | 
 | 826 | static inline int blk_rq_cur_bytes(const struct request *rq) | 
 | 827 | { | 
 | 828 | 	return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 
 | 829 | } | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 830 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 831 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | 
 | 832 |  | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 833 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 
 | 834 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 835 | 	return blk_rq_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 836 | } | 
 | 837 |  | 
 | 838 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | 
 | 839 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 840 | 	return blk_rq_cur_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 841 | } | 
 | 842 |  | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 843 | static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | 
 | 844 | 						     unsigned int cmd_flags) | 
 | 845 | { | 
 | 846 | 	if (unlikely(cmd_flags & REQ_DISCARD)) | 
| James Bottomley | 871dd92 | 2013-04-24 08:52:50 -0600 | [diff] [blame] | 847 | 		return min(q->limits.max_discard_sectors, UINT_MAX >> 9); | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 848 |  | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 849 | 	if (unlikely(cmd_flags & REQ_WRITE_SAME)) | 
 | 850 | 		return q->limits.max_write_same_sectors; | 
 | 851 |  | 
| Martin K. Petersen | f31dc1c | 2012-09-18 12:19:26 -0400 | [diff] [blame] | 852 | 	return q->limits.max_sectors; | 
 | 853 | } | 
 | 854 |  | 
 | 855 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | 
 | 856 | { | 
 | 857 | 	struct request_queue *q = rq->q; | 
 | 858 |  | 
 | 859 | 	if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 
 | 860 | 		return q->limits.max_hw_sectors; | 
 | 861 |  | 
 | 862 | 	return blk_queue_get_max_sectors(q, rq->cmd_flags); | 
 | 863 | } | 
 | 864 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 865 | /* | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 866 |  * Request issue related functions. | 
 | 867 |  */ | 
 | 868 | extern struct request *blk_peek_request(struct request_queue *q); | 
 | 869 | extern void blk_start_request(struct request *rq); | 
 | 870 | extern struct request *blk_fetch_request(struct request_queue *q); | 
 | 871 |  | 
 | 872 | /* | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 873 |  * Request completion related functions. | 
 | 874 |  * | 
 | 875 |  * blk_update_request() completes given number of bytes and updates | 
 | 876 |  * the request without completing it. | 
 | 877 |  * | 
| Tejun Heo | f06d9a2 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 878 |  * blk_end_request() and friends.  __blk_end_request() must be called | 
 | 879 |  * with the request queue spinlock acquired. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 |  * | 
 | 881 |  * Several drivers define their own end_request and call | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 882 |  * blk_end_request() for parts of the original function. | 
 | 883 |  * This prevents code duplication in drivers. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 |  */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 885 | extern bool blk_update_request(struct request *rq, int error, | 
 | 886 | 			       unsigned int nr_bytes); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 887 | extern bool blk_end_request(struct request *rq, int error, | 
 | 888 | 			    unsigned int nr_bytes); | 
 | 889 | extern void blk_end_request_all(struct request *rq, int error); | 
 | 890 | extern bool blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 891 | extern bool blk_end_request_err(struct request *rq, int error); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 892 | extern bool __blk_end_request(struct request *rq, int error, | 
 | 893 | 			      unsigned int nr_bytes); | 
 | 894 | extern void __blk_end_request_all(struct request *rq, int error); | 
 | 895 | extern bool __blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 896 | extern bool __blk_end_request_err(struct request *rq, int error); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 897 |  | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 898 | extern void blk_complete_request(struct request *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 899 | extern void __blk_complete_request(struct request *); | 
 | 900 | extern void blk_abort_request(struct request *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 901 | extern void blk_unprep_request(struct request *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 902 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 |  * Access functions for manipulating queue properties | 
 | 905 |  */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 906 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 907 | 					spinlock_t *lock, int node_id); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 908 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 909 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 
 | 910 | 						      request_fn_proc *, spinlock_t *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 911 | extern void blk_cleanup_queue(struct request_queue *); | 
 | 912 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 
 | 913 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 
| Mike Snitzer | 72d4cd9 | 2010-12-17 08:34:20 +0100 | [diff] [blame] | 914 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 
| Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 915 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 916 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 917 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 918 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 
 | 919 | 		unsigned int max_discard_sectors); | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 920 | extern void blk_queue_max_write_same_sectors(struct request_queue *q, | 
 | 921 | 		unsigned int max_write_same_sectors); | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 922 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 923 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 924 | extern void blk_queue_alignment_offset(struct request_queue *q, | 
 | 925 | 				       unsigned int alignment); | 
| Martin K. Petersen | 7c958e3 | 2009-07-31 11:49:11 -0400 | [diff] [blame] | 926 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 
| Martin K. Petersen | 3c5820c | 2009-09-11 21:54:52 +0200 | [diff] [blame] | 928 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 929 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 
| Martin K. Petersen | e475bba | 2009-06-16 08:23:52 +0200 | [diff] [blame] | 930 | extern void blk_set_default_limits(struct queue_limits *lim); | 
| Martin K. Petersen | b1bd055 | 2012-01-11 16:27:11 +0100 | [diff] [blame] | 931 | extern void blk_set_stacking_limits(struct queue_limits *lim); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 932 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 
 | 933 | 			    sector_t offset); | 
| Martin K. Petersen | 17be8c2 | 2010-01-11 03:21:49 -0500 | [diff] [blame] | 934 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 
 | 935 | 			    sector_t offset); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 936 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 
 | 937 | 			      sector_t offset); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 938 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 939 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 940 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 941 | extern int blk_queue_dma_drain(struct request_queue *q, | 
 | 942 | 			       dma_drain_needed_fn *dma_drain_needed, | 
 | 943 | 			       void *buf, unsigned int size); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 944 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 945 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 
 | 946 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 947 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 948 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 
 | 949 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 
| James Bottomley | 11c3e68 | 2007-12-31 16:37:00 -0600 | [diff] [blame] | 950 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 951 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 952 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 
 | 953 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 954 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 955 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 958 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 
| Asias He | 85b9f66 | 2012-08-02 23:42:04 +0200 | [diff] [blame] | 959 | extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | 
 | 960 | 			  struct scatterlist *sglist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | extern void blk_dump_rq_flags(struct request *, char *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | extern long nr_blockdev_pages(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 |  | 
| Tejun Heo | 09ac46c | 2011-12-14 00:33:38 +0100 | [diff] [blame] | 964 | bool __must_check blk_get_queue(struct request_queue *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 965 | struct request_queue *blk_alloc_queue(gfp_t); | 
 | 966 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 
 | 967 | extern void blk_put_queue(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 |  | 
| Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 969 | /* | 
| Lin Ming | 6c95466 | 2013-03-23 11:42:26 +0800 | [diff] [blame] | 970 |  * block layer runtime pm functions | 
 | 971 |  */ | 
 | 972 | #ifdef CONFIG_PM_RUNTIME | 
 | 973 | extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); | 
 | 974 | extern int blk_pre_runtime_suspend(struct request_queue *q); | 
 | 975 | extern void blk_post_runtime_suspend(struct request_queue *q, int err); | 
 | 976 | extern void blk_pre_runtime_resume(struct request_queue *q); | 
 | 977 | extern void blk_post_runtime_resume(struct request_queue *q, int err); | 
 | 978 | #else | 
 | 979 | static inline void blk_pm_runtime_init(struct request_queue *q, | 
 | 980 | 	struct device *dev) {} | 
 | 981 | static inline int blk_pre_runtime_suspend(struct request_queue *q) | 
 | 982 | { | 
 | 983 | 	return -ENOSYS; | 
 | 984 | } | 
 | 985 | static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} | 
 | 986 | static inline void blk_pre_runtime_resume(struct request_queue *q) {} | 
 | 987 | static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} | 
 | 988 | #endif | 
 | 989 |  | 
 | 990 | /* | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 991 |  * blk_plug permits building a queue of related requests by holding the I/O | 
 | 992 |  * fragments for a short period. This allows merging of sequential requests | 
 | 993 |  * into single larger request. As the requests are moved from a per-task list to | 
 | 994 |  * the device's request_queue in a batch, this results in improved scalability | 
 | 995 |  * as the lock contention for request_queue lock is reduced. | 
 | 996 |  * | 
 | 997 |  * It is ok not to disable preemption when adding the request to the plug list | 
 | 998 |  * or when attempting a merge, because blk_schedule_flush_list() will only flush | 
 | 999 |  * the plug list when the task sleeps by itself. For details, please see | 
 | 1000 |  * schedule() where blk_schedule_flush_plug() is called. | 
| Shaohua Li | 316cc67 | 2011-07-08 08:19:21 +0200 | [diff] [blame] | 1001 |  */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1002 | struct blk_plug { | 
| Suresh Jayaraman | 75df713 | 2011-09-21 10:00:16 +0200 | [diff] [blame] | 1003 | 	unsigned long magic; /* detect uninitialized use-cases */ | 
 | 1004 | 	struct list_head list; /* requests */ | 
 | 1005 | 	struct list_head cb_list; /* md requires an unplug callback */ | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1006 | }; | 
| Shaohua Li | 55c022b | 2011-07-08 08:19:20 +0200 | [diff] [blame] | 1007 | #define BLK_MAX_REQUEST_COUNT 16 | 
 | 1008 |  | 
| NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1009 | struct blk_plug_cb; | 
| NeilBrown | 74018dc | 2012-07-31 09:08:15 +0200 | [diff] [blame] | 1010 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1011 | struct blk_plug_cb { | 
 | 1012 | 	struct list_head list; | 
| NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1013 | 	blk_plug_cb_fn callback; | 
 | 1014 | 	void *data; | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1015 | }; | 
| NeilBrown | 9cbb175 | 2012-07-31 09:08:14 +0200 | [diff] [blame] | 1016 | extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, | 
 | 1017 | 					     void *data, int size); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1018 | extern void blk_start_plug(struct blk_plug *); | 
 | 1019 | extern void blk_finish_plug(struct blk_plug *); | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 1020 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1021 |  | 
 | 1022 | static inline void blk_flush_plug(struct task_struct *tsk) | 
 | 1023 | { | 
 | 1024 | 	struct blk_plug *plug = tsk->plug; | 
 | 1025 |  | 
| Christoph Hellwig | 88b996c | 2011-04-15 15:20:10 +0200 | [diff] [blame] | 1026 | 	if (plug) | 
| Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 1027 | 		blk_flush_plug_list(plug, false); | 
 | 1028 | } | 
 | 1029 |  | 
 | 1030 | static inline void blk_schedule_flush_plug(struct task_struct *tsk) | 
 | 1031 | { | 
 | 1032 | 	struct blk_plug *plug = tsk->plug; | 
 | 1033 |  | 
 | 1034 | 	if (plug) | 
| Jens Axboe | f660378 | 2011-04-15 15:49:07 +0200 | [diff] [blame] | 1035 | 		blk_flush_plug_list(plug, true); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1036 | } | 
 | 1037 |  | 
 | 1038 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 
 | 1039 | { | 
 | 1040 | 	struct blk_plug *plug = tsk->plug; | 
 | 1041 |  | 
| NeilBrown | 048c937 | 2011-04-18 09:52:22 +0200 | [diff] [blame] | 1042 | 	return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1043 | } | 
 | 1044 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | /* | 
 | 1046 |  * tag stuff | 
 | 1047 |  */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 1048 | #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED) | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1049 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 
 | 1050 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 
 | 1051 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 
 | 1052 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); | 
 | 1053 | extern void blk_queue_free_tags(struct request_queue *); | 
 | 1054 | extern int blk_queue_resize_tags(struct request_queue *, int); | 
 | 1055 | extern void blk_queue_invalidate_tags(struct request_queue *); | 
| James Bottomley | 492dfb4 | 2006-08-30 15:48:45 -0400 | [diff] [blame] | 1056 | extern struct blk_queue_tag *blk_init_tags(int); | 
 | 1057 | extern void blk_free_tags(struct blk_queue_tag *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 |  | 
| David C Somayajulu | f583f49 | 2006-10-04 08:27:25 +0200 | [diff] [blame] | 1059 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | 
 | 1060 | 						int tag) | 
 | 1061 | { | 
 | 1062 | 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | 
 | 1063 | 		return NULL; | 
 | 1064 | 	return bqt->tag_index[tag]; | 
 | 1065 | } | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 1066 |  | 
 | 1067 | #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */ | 
 | 1068 |  | 
 | 1069 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 
| Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 1070 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
 | 1071 | 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1072 | extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | 
 | 1073 | 		sector_t nr_sects, gfp_t gfp_mask, struct page *page); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 1074 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 1075 | 			sector_t nr_sects, gfp_t gfp_mask); | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 1076 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 
 | 1077 | 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 1078 | { | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 1079 | 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), | 
 | 1080 | 				    nr_blocks << (sb->s_blocksize_bits - 9), | 
 | 1081 | 				    gfp_mask, flags); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 1082 | } | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1083 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 1084 | 		sector_t nr_blocks, gfp_t gfp_mask) | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1085 | { | 
 | 1086 | 	return blkdev_issue_zeroout(sb->s_bdev, | 
 | 1087 | 				    block << (sb->s_blocksize_bits - 9), | 
 | 1088 | 				    nr_blocks << (sb->s_blocksize_bits - 9), | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 1089 | 				    gfp_mask); | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 1090 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 |  | 
| Jens Axboe | 018e044 | 2009-06-26 16:27:10 +0200 | [diff] [blame] | 1092 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 
| Adel Gadllah | 0b07de8 | 2008-06-26 13:48:27 +0200 | [diff] [blame] | 1093 |  | 
| Martin K. Petersen | eb28d31 | 2010-02-26 00:20:37 -0500 | [diff] [blame] | 1094 | enum blk_default_limits { | 
 | 1095 | 	BLK_MAX_SEGMENTS	= 128, | 
 | 1096 | 	BLK_SAFE_MAX_SECTORS	= 255, | 
 | 1097 | 	BLK_DEF_MAX_SECTORS	= 1024, | 
 | 1098 | 	BLK_MAX_SEGMENT_SIZE	= 65536, | 
 | 1099 | 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL, | 
 | 1100 | }; | 
| Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 1101 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 
 | 1103 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1104 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) | 
 | 1105 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1106 | 	return q->limits.bounce_pfn; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1107 | } | 
 | 1108 |  | 
 | 1109 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | 
 | 1110 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1111 | 	return q->limits.seg_boundary_mask; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1112 | } | 
 | 1113 |  | 
 | 1114 | static inline unsigned int queue_max_sectors(struct request_queue *q) | 
 | 1115 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1116 | 	return q->limits.max_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1117 | } | 
 | 1118 |  | 
 | 1119 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | 
 | 1120 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1121 | 	return q->limits.max_hw_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1122 | } | 
 | 1123 |  | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1124 | static inline unsigned short queue_max_segments(struct request_queue *q) | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1125 | { | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 1126 | 	return q->limits.max_segments; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1127 | } | 
 | 1128 |  | 
 | 1129 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 
 | 1130 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1131 | 	return q->limits.max_segment_size; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 1132 | } | 
 | 1133 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1134 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1135 | { | 
 | 1136 | 	int retval = 512; | 
 | 1137 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 1138 | 	if (q && q->limits.logical_block_size) | 
 | 1139 | 		retval = q->limits.logical_block_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 |  | 
 | 1141 | 	return retval; | 
 | 1142 | } | 
 | 1143 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1144 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | { | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 1146 | 	return queue_logical_block_size(bdev_get_queue(bdev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1147 | } | 
 | 1148 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1149 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | 
 | 1150 | { | 
 | 1151 | 	return q->limits.physical_block_size; | 
 | 1152 | } | 
 | 1153 |  | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 1154 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1155 | { | 
 | 1156 | 	return queue_physical_block_size(bdev_get_queue(bdev)); | 
 | 1157 | } | 
 | 1158 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1159 | static inline unsigned int queue_io_min(struct request_queue *q) | 
 | 1160 | { | 
 | 1161 | 	return q->limits.io_min; | 
 | 1162 | } | 
 | 1163 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1164 | static inline int bdev_io_min(struct block_device *bdev) | 
 | 1165 | { | 
 | 1166 | 	return queue_io_min(bdev_get_queue(bdev)); | 
 | 1167 | } | 
 | 1168 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1169 | static inline unsigned int queue_io_opt(struct request_queue *q) | 
 | 1170 | { | 
 | 1171 | 	return q->limits.io_opt; | 
 | 1172 | } | 
 | 1173 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1174 | static inline int bdev_io_opt(struct block_device *bdev) | 
 | 1175 | { | 
 | 1176 | 	return queue_io_opt(bdev_get_queue(bdev)); | 
 | 1177 | } | 
 | 1178 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1179 | static inline int queue_alignment_offset(struct request_queue *q) | 
 | 1180 | { | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1181 | 	if (q->limits.misaligned) | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1182 | 		return -1; | 
 | 1183 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1184 | 	return q->limits.alignment_offset; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1185 | } | 
 | 1186 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1187 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1188 | { | 
 | 1189 | 	unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1190 | 	unsigned int alignment = (sector << 9) & (granularity - 1); | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1191 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1192 | 	return (granularity + lim->alignment_offset - alignment) | 
 | 1193 | 		& (granularity - 1); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1194 | } | 
 | 1195 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1196 | static inline int bdev_alignment_offset(struct block_device *bdev) | 
 | 1197 | { | 
 | 1198 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 1199 |  | 
 | 1200 | 	if (q->limits.misaligned) | 
 | 1201 | 		return -1; | 
 | 1202 |  | 
 | 1203 | 	if (bdev != bdev->bd_contains) | 
 | 1204 | 		return bdev->bd_part->alignment_offset; | 
 | 1205 |  | 
 | 1206 | 	return q->limits.alignment_offset; | 
 | 1207 | } | 
 | 1208 |  | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1209 | static inline int queue_discard_alignment(struct request_queue *q) | 
 | 1210 | { | 
 | 1211 | 	if (q->limits.discard_misaligned) | 
 | 1212 | 		return -1; | 
 | 1213 |  | 
 | 1214 | 	return q->limits.discard_alignment; | 
 | 1215 | } | 
 | 1216 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1217 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1218 | { | 
| Linus Torvalds | 5977107 | 2012-12-19 07:18:35 -0800 | [diff] [blame] | 1219 | 	unsigned int alignment, granularity, offset; | 
| Martin K. Petersen | dd3d145 | 2010-01-11 03:21:48 -0500 | [diff] [blame] | 1220 |  | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 1221 | 	if (!lim->max_discard_sectors) | 
 | 1222 | 		return 0; | 
 | 1223 |  | 
| Linus Torvalds | 5977107 | 2012-12-19 07:18:35 -0800 | [diff] [blame] | 1224 | 	/* Why are these in bytes, not sectors? */ | 
 | 1225 | 	alignment = lim->discard_alignment >> 9; | 
 | 1226 | 	granularity = lim->discard_granularity >> 9; | 
 | 1227 | 	if (!granularity) | 
 | 1228 | 		return 0; | 
 | 1229 |  | 
 | 1230 | 	/* Offset of the partition start in 'granularity' sectors */ | 
 | 1231 | 	offset = sector_div(sector, granularity); | 
 | 1232 |  | 
 | 1233 | 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */ | 
 | 1234 | 	offset = (granularity + alignment - offset) % granularity; | 
 | 1235 |  | 
 | 1236 | 	/* Turn it back into bytes, gaah */ | 
 | 1237 | 	return offset << 9; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1238 | } | 
 | 1239 |  | 
| Paolo Bonzini | c6e6663 | 2012-08-02 09:48:50 +0200 | [diff] [blame] | 1240 | static inline int bdev_discard_alignment(struct block_device *bdev) | 
 | 1241 | { | 
 | 1242 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 1243 |  | 
 | 1244 | 	if (bdev != bdev->bd_contains) | 
 | 1245 | 		return bdev->bd_part->discard_alignment; | 
 | 1246 |  | 
 | 1247 | 	return q->limits.discard_alignment; | 
 | 1248 | } | 
 | 1249 |  | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 1250 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 
 | 1251 | { | 
| Martin K. Petersen | a934a00 | 2011-05-18 10:37:35 +0200 | [diff] [blame] | 1252 | 	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 1253 | 		return 1; | 
 | 1254 |  | 
 | 1255 | 	return 0; | 
 | 1256 | } | 
 | 1257 |  | 
 | 1258 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | 
 | 1259 | { | 
 | 1260 | 	return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 
 | 1261 | } | 
 | 1262 |  | 
| Martin K. Petersen | 4363ac7 | 2012-09-18 12:19:27 -0400 | [diff] [blame] | 1263 | static inline unsigned int bdev_write_same(struct block_device *bdev) | 
 | 1264 | { | 
 | 1265 | 	struct request_queue *q = bdev_get_queue(bdev); | 
 | 1266 |  | 
 | 1267 | 	if (q) | 
 | 1268 | 		return q->limits.max_write_same_sectors; | 
 | 1269 |  | 
 | 1270 | 	return 0; | 
 | 1271 | } | 
 | 1272 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1273 | static inline int queue_dma_alignment(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1274 | { | 
| Pete Wyckoff | 482eb68 | 2008-01-01 10:23:02 -0500 | [diff] [blame] | 1275 | 	return q ? q->dma_alignment : 511; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | } | 
 | 1277 |  | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1278 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1279 | 				 unsigned int len) | 
 | 1280 | { | 
 | 1281 | 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1282 | 	return !(addr & alignment) && !(len & alignment); | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1283 | } | 
 | 1284 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | /* assumes size > 256 */ | 
 | 1286 | static inline unsigned int blksize_bits(unsigned int size) | 
 | 1287 | { | 
 | 1288 | 	unsigned int bits = 8; | 
 | 1289 | 	do { | 
 | 1290 | 		bits++; | 
 | 1291 | 		size >>= 1; | 
 | 1292 | 	} while (size > 256); | 
 | 1293 | 	return bits; | 
 | 1294 | } | 
 | 1295 |  | 
| Adrian Bunk | 2befb9e | 2005-09-10 00:27:17 -0700 | [diff] [blame] | 1296 | static inline unsigned int block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | { | 
 | 1298 | 	return bdev->bd_block_size; | 
 | 1299 | } | 
 | 1300 |  | 
| shaohua.li@intel.com | f387693 | 2011-05-06 11:34:32 -0600 | [diff] [blame] | 1301 | static inline bool queue_flush_queueable(struct request_queue *q) | 
 | 1302 | { | 
 | 1303 | 	return !q->flush_not_queueable; | 
 | 1304 | } | 
 | 1305 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | typedef struct {struct page *v;} Sector; | 
 | 1307 |  | 
 | 1308 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 
 | 1309 |  | 
 | 1310 | static inline void put_dev_sector(Sector p) | 
 | 1311 | { | 
 | 1312 | 	page_cache_release(p.v); | 
 | 1313 | } | 
 | 1314 |  | 
 | 1315 | struct work_struct; | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 1316 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 |  | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1318 | #ifdef CONFIG_BLK_CGROUP | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1319 | /* | 
 | 1320 |  * This should not be using sched_clock(). A real patch is in progress | 
 | 1321 |  * to fix this up, until that is in place we need to disable preemption | 
 | 1322 |  * around sched_clock() in this function and set_io_start_time_ns(). | 
 | 1323 |  */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1324 | static inline void set_start_time_ns(struct request *req) | 
 | 1325 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1326 | 	preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1327 | 	req->start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1328 | 	preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1329 | } | 
 | 1330 |  | 
 | 1331 | static inline void set_io_start_time_ns(struct request *req) | 
 | 1332 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1333 | 	preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1334 | 	req->io_start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1335 | 	preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1336 | } | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1337 |  | 
 | 1338 | static inline uint64_t rq_start_time_ns(struct request *req) | 
 | 1339 | { | 
 | 1340 |         return req->start_time_ns; | 
 | 1341 | } | 
 | 1342 |  | 
 | 1343 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
 | 1344 | { | 
 | 1345 |         return req->io_start_time_ns; | 
 | 1346 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1347 | #else | 
 | 1348 | static inline void set_start_time_ns(struct request *req) {} | 
 | 1349 | static inline void set_io_start_time_ns(struct request *req) {} | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1350 | static inline uint64_t rq_start_time_ns(struct request *req) | 
 | 1351 | { | 
 | 1352 | 	return 0; | 
 | 1353 | } | 
 | 1354 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
 | 1355 | { | 
 | 1356 | 	return 0; | 
 | 1357 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1358 | #endif | 
 | 1359 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 
 | 1361 | 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 
 | 1362 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 
 | 1363 | 	MODULE_ALIAS("block-major-" __stringify(major) "-*") | 
 | 1364 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1365 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
 | 1366 |  | 
| Jens Axboe | b24498d | 2008-06-27 09:12:09 +0200 | [diff] [blame] | 1367 | #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */ | 
 | 1368 | #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */ | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1369 |  | 
 | 1370 | struct blk_integrity_exchg { | 
 | 1371 | 	void			*prot_buf; | 
 | 1372 | 	void			*data_buf; | 
 | 1373 | 	sector_t		sector; | 
 | 1374 | 	unsigned int		data_size; | 
 | 1375 | 	unsigned short		sector_size; | 
 | 1376 | 	const char		*disk_name; | 
 | 1377 | }; | 
 | 1378 |  | 
 | 1379 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | 
 | 1380 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | 
 | 1381 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | 
 | 1382 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | 
 | 1383 |  | 
 | 1384 | struct blk_integrity { | 
 | 1385 | 	integrity_gen_fn	*generate_fn; | 
 | 1386 | 	integrity_vrfy_fn	*verify_fn; | 
 | 1387 | 	integrity_set_tag_fn	*set_tag_fn; | 
 | 1388 | 	integrity_get_tag_fn	*get_tag_fn; | 
 | 1389 |  | 
 | 1390 | 	unsigned short		flags; | 
 | 1391 | 	unsigned short		tuple_size; | 
 | 1392 | 	unsigned short		sector_size; | 
 | 1393 | 	unsigned short		tag_size; | 
 | 1394 |  | 
 | 1395 | 	const char		*name; | 
 | 1396 |  | 
 | 1397 | 	struct kobject		kobj; | 
 | 1398 | }; | 
 | 1399 |  | 
| Mike Snitzer | a63a5cf | 2011-04-01 21:02:31 +0200 | [diff] [blame] | 1400 | extern bool blk_integrity_is_initialized(struct gendisk *); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1401 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 
 | 1402 | extern void blk_integrity_unregister(struct gendisk *); | 
| Martin K. Petersen | ad7fce9 | 2008-10-01 03:38:39 -0400 | [diff] [blame] | 1403 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1404 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 
 | 1405 | 				   struct scatterlist *); | 
 | 1406 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | 
 | 1407 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | 
 | 1408 | 				  struct request *); | 
 | 1409 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | 
 | 1410 | 				   struct bio *); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1411 |  | 
| Jens Axboe | b04accc | 2008-10-02 12:53:22 +0200 | [diff] [blame] | 1412 | static inline | 
 | 1413 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 
 | 1414 | { | 
 | 1415 | 	return bdev->bd_disk->integrity; | 
 | 1416 | } | 
 | 1417 |  | 
| Martin K. Petersen | b02739b | 2008-10-02 18:47:49 +0200 | [diff] [blame] | 1418 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 
 | 1419 | { | 
 | 1420 | 	return disk->integrity; | 
 | 1421 | } | 
 | 1422 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1423 | static inline int blk_integrity_rq(struct request *rq) | 
 | 1424 | { | 
| Martin K. Petersen | d442cc4 | 2008-07-16 16:09:06 -0400 | [diff] [blame] | 1425 | 	if (rq->bio == NULL) | 
 | 1426 | 		return 0; | 
 | 1427 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1428 | 	return bio_integrity(rq->bio); | 
 | 1429 | } | 
 | 1430 |  | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1431 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 
 | 1432 | 						    unsigned int segs) | 
 | 1433 | { | 
 | 1434 | 	q->limits.max_integrity_segments = segs; | 
 | 1435 | } | 
 | 1436 |  | 
 | 1437 | static inline unsigned short | 
 | 1438 | queue_max_integrity_segments(struct request_queue *q) | 
 | 1439 | { | 
 | 1440 | 	return q->limits.max_integrity_segments; | 
 | 1441 | } | 
 | 1442 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1443 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 
 | 1444 |  | 
| Stephen Rothwell | fd83240 | 2012-01-12 09:17:30 +0100 | [diff] [blame] | 1445 | struct bio; | 
 | 1446 | struct block_device; | 
 | 1447 | struct gendisk; | 
 | 1448 | struct blk_integrity; | 
 | 1449 |  | 
 | 1450 | static inline int blk_integrity_rq(struct request *rq) | 
 | 1451 | { | 
 | 1452 | 	return 0; | 
 | 1453 | } | 
 | 1454 | static inline int blk_rq_count_integrity_sg(struct request_queue *q, | 
 | 1455 | 					    struct bio *b) | 
 | 1456 | { | 
 | 1457 | 	return 0; | 
 | 1458 | } | 
 | 1459 | static inline int blk_rq_map_integrity_sg(struct request_queue *q, | 
 | 1460 | 					  struct bio *b, | 
 | 1461 | 					  struct scatterlist *s) | 
 | 1462 | { | 
 | 1463 | 	return 0; | 
 | 1464 | } | 
 | 1465 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) | 
 | 1466 | { | 
 | 1467 | 	return 0; | 
 | 1468 | } | 
 | 1469 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 
 | 1470 | { | 
 | 1471 | 	return NULL; | 
 | 1472 | } | 
 | 1473 | static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) | 
 | 1474 | { | 
 | 1475 | 	return 0; | 
 | 1476 | } | 
 | 1477 | static inline int blk_integrity_register(struct gendisk *d, | 
 | 1478 | 					 struct blk_integrity *b) | 
 | 1479 | { | 
 | 1480 | 	return 0; | 
 | 1481 | } | 
 | 1482 | static inline void blk_integrity_unregister(struct gendisk *d) | 
 | 1483 | { | 
 | 1484 | } | 
 | 1485 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 
 | 1486 | 						    unsigned int segs) | 
 | 1487 | { | 
 | 1488 | } | 
 | 1489 | static inline unsigned short queue_max_integrity_segments(struct request_queue *q) | 
 | 1490 | { | 
 | 1491 | 	return 0; | 
 | 1492 | } | 
 | 1493 | static inline int blk_integrity_merge_rq(struct request_queue *rq, | 
 | 1494 | 					 struct request *r1, | 
 | 1495 | 					 struct request *r2) | 
 | 1496 | { | 
 | 1497 | 	return 0; | 
 | 1498 | } | 
 | 1499 | static inline int blk_integrity_merge_bio(struct request_queue *rq, | 
 | 1500 | 					  struct request *r, | 
 | 1501 | 					  struct bio *b) | 
 | 1502 | { | 
 | 1503 | 	return 0; | 
 | 1504 | } | 
 | 1505 | static inline bool blk_integrity_is_initialized(struct gendisk *g) | 
 | 1506 | { | 
 | 1507 | 	return 0; | 
 | 1508 | } | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1509 |  | 
 | 1510 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 
 | 1511 |  | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1512 | struct block_device_operations { | 
| Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1513 | 	int (*open) (struct block_device *, fmode_t); | 
| Al Viro | db2a144 | 2013-05-05 21:52:57 -0400 | [diff] [blame] | 1514 | 	void (*release) (struct gendisk *, fmode_t); | 
| Al Viro | d4430d6 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1515 | 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
 | 1516 | 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1517 | 	int (*direct_access) (struct block_device *, sector_t, | 
 | 1518 | 						void **, unsigned long *); | 
| Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1519 | 	unsigned int (*check_events) (struct gendisk *disk, | 
 | 1520 | 				      unsigned int clearing); | 
 | 1521 | 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */ | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1522 | 	int (*media_changed) (struct gendisk *); | 
| Tejun Heo | c3e33e0 | 2010-05-15 20:09:29 +0200 | [diff] [blame] | 1523 | 	void (*unlock_native_capacity) (struct gendisk *); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1524 | 	int (*revalidate_disk) (struct gendisk *); | 
 | 1525 | 	int (*getgeo)(struct block_device *, struct hd_geometry *); | 
| Nitin Gupta | b3a27d0 | 2010-05-17 11:02:43 +0530 | [diff] [blame] | 1526 | 	/* this callback is with swap_lock and sometimes page table lock held */ | 
 | 1527 | 	void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1528 | 	struct module *owner; | 
 | 1529 | }; | 
 | 1530 |  | 
| Al Viro | 633a08b | 2007-08-29 20:34:12 -0400 | [diff] [blame] | 1531 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 
 | 1532 | 				 unsigned long); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1533 | #else /* CONFIG_BLOCK */ | 
 | 1534 | /* | 
 | 1535 |  * stubs for when the block layer is configured out | 
 | 1536 |  */ | 
 | 1537 | #define buffer_heads_over_limit 0 | 
 | 1538 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1539 | static inline long nr_blockdev_pages(void) | 
 | 1540 | { | 
 | 1541 | 	return 0; | 
 | 1542 | } | 
 | 1543 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1544 | struct blk_plug { | 
 | 1545 | }; | 
 | 1546 |  | 
 | 1547 | static inline void blk_start_plug(struct blk_plug *plug) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1548 | { | 
 | 1549 | } | 
 | 1550 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1551 | static inline void blk_finish_plug(struct blk_plug *plug) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1552 | { | 
 | 1553 | } | 
 | 1554 |  | 
| Jens Axboe | 1f940bd | 2011-03-11 20:17:08 +0100 | [diff] [blame] | 1555 | static inline void blk_flush_plug(struct task_struct *task) | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1556 | { | 
 | 1557 | } | 
 | 1558 |  | 
| Jens Axboe | a237c1c | 2011-04-16 13:27:55 +0200 | [diff] [blame] | 1559 | static inline void blk_schedule_flush_plug(struct task_struct *task) | 
 | 1560 | { | 
 | 1561 | } | 
 | 1562 |  | 
 | 1563 |  | 
| Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1564 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | 
 | 1565 | { | 
 | 1566 | 	return false; | 
 | 1567 | } | 
 | 1568 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1569 | #endif /* CONFIG_BLOCK */ | 
 | 1570 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 | #endif |