| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_BLKDEV_H | 
|  | 2 | #define _LINUX_BLKDEV_H | 
|  | 3 |  | 
| Jens Axboe | f5ff842 | 2007-09-21 09:19:54 +0200 | [diff] [blame] | 4 | #ifdef CONFIG_BLOCK | 
|  | 5 |  | 
| Andrew Morton | bcfd8d3 | 2006-08-31 12:56:06 +0200 | [diff] [blame] | 6 | #include <linux/sched.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/major.h> | 
|  | 8 | #include <linux/genhd.h> | 
|  | 9 | #include <linux/list.h> | 
|  | 10 | #include <linux/timer.h> | 
|  | 11 | #include <linux/workqueue.h> | 
|  | 12 | #include <linux/pagemap.h> | 
|  | 13 | #include <linux/backing-dev.h> | 
|  | 14 | #include <linux/wait.h> | 
|  | 15 | #include <linux/mempool.h> | 
|  | 16 | #include <linux/bio.h> | 
|  | 17 | #include <linux/module.h> | 
|  | 18 | #include <linux/stringify.h> | 
| Hugh Dickins | 3e6053d | 2008-09-11 10:57:55 +0200 | [diff] [blame] | 19 | #include <linux/gfp.h> | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 20 | #include <linux/bsg.h> | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 21 | #include <linux/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 |  | 
|  | 23 | #include <asm/scatterlist.h> | 
|  | 24 |  | 
| Christoph Hellwig | 21b2f0c | 2006-03-22 17:52:04 +0100 | [diff] [blame] | 25 | struct scsi_ioctl_command; | 
|  | 26 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | struct request_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | struct elevator_queue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | struct request_pm_state; | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 30 | struct blk_trace; | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 31 | struct request; | 
|  | 32 | struct sg_io_hdr; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 |  | 
|  | 34 | #define BLKDEV_MIN_RQ	4 | 
|  | 35 | #define BLKDEV_MAX_RQ	128	/* Default maximum */ | 
|  | 36 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | struct request; | 
| Tejun Heo | 8ffdc65 | 2006-01-06 09:49:03 +0100 | [diff] [blame] | 38 | typedef void (rq_end_io_fn)(struct request *, int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 |  | 
|  | 40 | struct request_list { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 41 | /* | 
|  | 42 | * count[], starved[], and wait[] are indexed by | 
|  | 43 | * BLK_RW_SYNC/BLK_RW_ASYNC | 
|  | 44 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | int count[2]; | 
|  | 46 | int starved[2]; | 
| Tejun Heo | cb98fc8 | 2005-10-28 08:29:39 +0200 | [diff] [blame] | 47 | int elvpriv; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | mempool_t *rq_pool; | 
|  | 49 | wait_queue_head_t wait[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | }; | 
|  | 51 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 52 | /* | 
|  | 53 | * request command types | 
|  | 54 | */ | 
|  | 55 | enum rq_cmd_type_bits { | 
|  | 56 | REQ_TYPE_FS		= 1,	/* fs request */ | 
|  | 57 | REQ_TYPE_BLOCK_PC,		/* scsi command */ | 
|  | 58 | REQ_TYPE_SENSE,			/* sense request */ | 
|  | 59 | REQ_TYPE_PM_SUSPEND,		/* suspend request */ | 
|  | 60 | REQ_TYPE_PM_RESUME,		/* resume request */ | 
|  | 61 | REQ_TYPE_PM_SHUTDOWN,		/* shutdown request */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 62 | REQ_TYPE_SPECIAL,		/* driver defined type */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 63 | /* | 
|  | 64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | 
|  | 65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 
|  | 66 | * private REQ_LB opcodes to differentiate what type of request this is | 
|  | 67 | */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 68 | REQ_TYPE_ATA_TASKFILE, | 
| Jens Axboe | cea2885 | 2006-10-12 15:08:45 +0200 | [diff] [blame] | 69 | REQ_TYPE_ATA_PC, | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 70 | }; | 
|  | 71 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #define BLK_MAX_CDB	16 | 
|  | 73 |  | 
|  | 74 | /* | 
| Jens Axboe | 63a7138 | 2008-02-08 12:41:03 +0100 | [diff] [blame] | 75 | * try to put the fields that are referenced together in the same cacheline. | 
|  | 76 | * if you modify this structure, be sure to check block/blk-core.c:rq_init() | 
|  | 77 | * as well! | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | */ | 
|  | 79 | struct request { | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 80 | struct list_head queuelist; | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 81 | struct call_single_data csd; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 82 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 83 | struct request_queue *q; | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 84 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 85 | unsigned int cmd_flags; | 
|  | 86 | enum rq_cmd_type_bits cmd_type; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 87 | unsigned long atomic_flags; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 89 | int cpu; | 
|  | 90 |  | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 91 | /* the following two fields are internal, NEVER access directly */ | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 92 | unsigned int __data_len;	/* total data len */ | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 93 | sector_t __sector;		/* sector cursor */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 |  | 
|  | 95 | struct bio *bio; | 
|  | 96 | struct bio *biotail; | 
|  | 97 |  | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 98 | struct hlist_node hash;	/* merge hash */ | 
| Jens Axboe | e6a1c87 | 2006-08-10 09:00:21 +0200 | [diff] [blame] | 99 | /* | 
|  | 100 | * The rb_node is only used inside the io scheduler, requests | 
|  | 101 | * are pruned when moved to the dispatch queue. So let the | 
|  | 102 | * completion_data share space with the rb_node. | 
|  | 103 | */ | 
|  | 104 | union { | 
|  | 105 | struct rb_node rb_node;	/* sort/lookup */ | 
|  | 106 | void *completion_data; | 
|  | 107 | }; | 
| Jens Axboe | 9817064 | 2006-07-28 09:23:08 +0200 | [diff] [blame] | 108 |  | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 109 | /* | 
| Vivek Goyal | 7f1dc8a | 2010-04-21 17:44:16 +0200 | [diff] [blame] | 110 | * Three pointers are available for the IO schedulers, if they need | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 111 | * more they have to dynamically allocate it. | 
|  | 112 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | void *elevator_private; | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 114 | void *elevator_private2; | 
| Vivek Goyal | 7f1dc8a | 2010-04-21 17:44:16 +0200 | [diff] [blame] | 115 | void *elevator_private3; | 
| Jens Axboe | ff7d145 | 2006-07-12 14:04:37 +0200 | [diff] [blame] | 116 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 117 | struct gendisk *rq_disk; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | unsigned long start_time; | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 119 | #ifdef CONFIG_BLK_CGROUP | 
|  | 120 | unsigned long long start_time_ns; | 
|  | 121 | unsigned long long io_start_time_ns;    /* when passed to hardware */ | 
|  | 122 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | /* Number of scatter-gather DMA addr+len pairs after | 
|  | 124 | * physical address coalescing is performed. | 
|  | 125 | */ | 
|  | 126 | unsigned short nr_phys_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 127 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
|  | 128 | unsigned short nr_integrity_segments; | 
|  | 129 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 |  | 
| Jens Axboe | 8f34ee7 | 2006-06-13 09:02:34 +0200 | [diff] [blame] | 131 | unsigned short ioprio; | 
|  | 132 |  | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 133 | int ref_count; | 
|  | 134 |  | 
| Tejun Heo | 731ec49 | 2009-04-23 11:05:20 +0900 | [diff] [blame] | 135 | void *special;		/* opaque pointer available for LLD use */ | 
|  | 136 | char *buffer;		/* kaddr of the current segment if available */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 |  | 
| Jens Axboe | cdd6026 | 2006-07-28 09:32:07 +0200 | [diff] [blame] | 138 | int tag; | 
|  | 139 | int errors; | 
|  | 140 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | /* | 
|  | 142 | * when request is used as a packet command carrier | 
|  | 143 | */ | 
| FUJITA Tomonori | d7e3c32 | 2008-04-29 09:54:39 +0200 | [diff] [blame] | 144 | unsigned char __cmd[BLK_MAX_CDB]; | 
|  | 145 | unsigned char *cmd; | 
| Richard Kennedy | 181fdde | 2010-03-19 08:58:16 +0100 | [diff] [blame] | 146 | unsigned short cmd_len; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 |  | 
| FUJITA Tomonori | 7a85f88 | 2008-03-04 11:17:11 +0100 | [diff] [blame] | 148 | unsigned int extra_len;	/* length of alignment and padding */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | unsigned int sense_len; | 
| Tejun Heo | c3a4d78 | 2009-05-07 22:24:37 +0900 | [diff] [blame] | 150 | unsigned int resid_len;	/* residual count */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | void *sense; | 
|  | 152 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 153 | unsigned long deadline; | 
|  | 154 | struct list_head timeout_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | unsigned int timeout; | 
| Mike Christie | 17e01f2 | 2005-11-11 05:31:37 -0600 | [diff] [blame] | 156 | int retries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 |  | 
|  | 158 | /* | 
| Jens Axboe | c00895a | 2006-09-30 20:29:12 +0200 | [diff] [blame] | 159 | * completion callback. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | */ | 
|  | 161 | rq_end_io_fn *end_io; | 
|  | 162 | void *end_io_data; | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 163 |  | 
|  | 164 | /* for bidi */ | 
|  | 165 | struct request *next_rq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | }; | 
|  | 167 |  | 
| Fernando Luis Vázquez Cao | 766ca44 | 2008-08-14 09:59:13 +0200 | [diff] [blame] | 168 | static inline unsigned short req_get_ioprio(struct request *req) | 
|  | 169 | { | 
|  | 170 | return req->ioprio; | 
|  | 171 | } | 
|  | 172 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | /* | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 174 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | * requests. Some step values could eventually be made generic. | 
|  | 176 | */ | 
|  | 177 | struct request_pm_state | 
|  | 178 | { | 
|  | 179 | /* PM state machine step value, currently driver specific */ | 
|  | 180 | int	pm_step; | 
|  | 181 | /* requested PM state value (S1, S2, S3, S4, ...) */ | 
|  | 182 | u32	pm_state; | 
|  | 183 | void*	data;		/* for driver use */ | 
|  | 184 | }; | 
|  | 185 |  | 
|  | 186 | #include <linux/elevator.h> | 
|  | 187 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 188 | typedef void (request_fn_proc) (struct request_queue *q); | 
|  | 189 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 
|  | 190 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 191 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 192 | typedef void (unplug_fn) (struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 |  | 
|  | 194 | struct bio_vec; | 
| Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 195 | struct bvec_merge_data { | 
|  | 196 | struct block_device *bi_bdev; | 
|  | 197 | sector_t bi_sector; | 
|  | 198 | unsigned bi_size; | 
|  | 199 | unsigned long bi_rw; | 
|  | 200 | }; | 
|  | 201 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 
|  | 202 | struct bio_vec *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 203 | typedef void (softirq_done_fn)(struct request *); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 204 | typedef int (dma_drain_needed_fn)(struct request *); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 205 | typedef int (lld_busy_fn) (struct request_queue *q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 207 | enum blk_eh_timer_return { | 
|  | 208 | BLK_EH_NOT_HANDLED, | 
|  | 209 | BLK_EH_HANDLED, | 
|  | 210 | BLK_EH_RESET_TIMER, | 
|  | 211 | }; | 
|  | 212 |  | 
|  | 213 | typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); | 
|  | 214 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | enum blk_queue_state { | 
|  | 216 | Queue_down, | 
|  | 217 | Queue_up, | 
|  | 218 | }; | 
|  | 219 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | struct blk_queue_tag { | 
|  | 221 | struct request **tag_index;	/* map of busy tags */ | 
|  | 222 | unsigned long *tag_map;		/* bit map of free/busy tags */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | int busy;			/* current depth */ | 
|  | 224 | int max_depth;			/* what we will send to device */ | 
| Tejun Heo | ba02508 | 2005-08-05 13:28:11 -0700 | [diff] [blame] | 225 | int real_max_depth;		/* what the array can hold */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | atomic_t refcnt;		/* map can be shared */ | 
|  | 227 | }; | 
|  | 228 |  | 
| FUJITA Tomonori | abf5439 | 2008-08-16 14:10:05 +0900 | [diff] [blame] | 229 | #define BLK_SCSI_MAX_CMDS	(256) | 
|  | 230 | #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 
|  | 231 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 232 | struct queue_limits { | 
|  | 233 | unsigned long		bounce_pfn; | 
|  | 234 | unsigned long		seg_boundary_mask; | 
|  | 235 |  | 
|  | 236 | unsigned int		max_hw_sectors; | 
|  | 237 | unsigned int		max_sectors; | 
|  | 238 | unsigned int		max_segment_size; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 239 | unsigned int		physical_block_size; | 
|  | 240 | unsigned int		alignment_offset; | 
|  | 241 | unsigned int		io_min; | 
|  | 242 | unsigned int		io_opt; | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 243 | unsigned int		max_discard_sectors; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 244 | unsigned int		discard_granularity; | 
|  | 245 | unsigned int		discard_alignment; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 246 |  | 
|  | 247 | unsigned short		logical_block_size; | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 248 | unsigned short		max_segments; | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 249 | unsigned short		max_integrity_segments; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 250 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 251 | unsigned char		misaligned; | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 252 | unsigned char		discard_misaligned; | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 253 | unsigned char		cluster; | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 254 | signed char		discard_zeroes_data; | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 255 | }; | 
|  | 256 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | struct request_queue | 
|  | 258 | { | 
|  | 259 | /* | 
|  | 260 | * Together with queue_head for cacheline sharing | 
|  | 261 | */ | 
|  | 262 | struct list_head	queue_head; | 
|  | 263 | struct request		*last_merge; | 
| Jens Axboe | b374d18 | 2008-10-31 10:05:07 +0100 | [diff] [blame] | 264 | struct elevator_queue	*elevator; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 |  | 
|  | 266 | /* | 
|  | 267 | * the queue request freelist, one for reads and one for writes | 
|  | 268 | */ | 
|  | 269 | struct request_list	rq; | 
|  | 270 |  | 
|  | 271 | request_fn_proc		*request_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | make_request_fn		*make_request_fn; | 
|  | 273 | prep_rq_fn		*prep_rq_fn; | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 274 | unprep_rq_fn		*unprep_rq_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | unplug_fn		*unplug_fn; | 
|  | 276 | merge_bvec_fn		*merge_bvec_fn; | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 277 | softirq_done_fn		*softirq_done_fn; | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 278 | rq_timed_out_fn		*rq_timed_out_fn; | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 279 | dma_drain_needed_fn	*dma_drain_needed; | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 280 | lld_busy_fn		*lld_busy_fn; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 |  | 
|  | 282 | /* | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 283 | * Dispatch queue sorting | 
|  | 284 | */ | 
| Jens Axboe | 1b47f53 | 2005-10-20 16:37:00 +0200 | [diff] [blame] | 285 | sector_t		end_sector; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 286 | struct request		*boundary_rq; | 
| Tejun Heo | 8922e16 | 2005-10-20 16:23:44 +0200 | [diff] [blame] | 287 |  | 
|  | 288 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | * Auto-unplugging state | 
|  | 290 | */ | 
|  | 291 | struct timer_list	unplug_timer; | 
|  | 292 | int			unplug_thresh;	/* After this many requests */ | 
|  | 293 | unsigned long		unplug_delay;	/* After this many jiffies */ | 
|  | 294 | struct work_struct	unplug_work; | 
|  | 295 |  | 
|  | 296 | struct backing_dev_info	backing_dev_info; | 
|  | 297 |  | 
|  | 298 | /* | 
|  | 299 | * The queue owner gets to use this for whatever they like. | 
|  | 300 | * ll_rw_blk doesn't touch it. | 
|  | 301 | */ | 
|  | 302 | void			*queuedata; | 
|  | 303 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 304 | /* | 
|  | 305 | * queue needs bounce pages for pages above this limit | 
|  | 306 | */ | 
| Al Viro | 8267e26 | 2005-10-21 03:20:53 -0400 | [diff] [blame] | 307 | gfp_t			bounce_gfp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 |  | 
|  | 309 | /* | 
|  | 310 | * various queue flags, see QUEUE_* below | 
|  | 311 | */ | 
|  | 312 | unsigned long		queue_flags; | 
|  | 313 |  | 
|  | 314 | /* | 
|  | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 315 | * protects queue structures from reentrancy. ->__queue_lock should | 
|  | 316 | * _never_ be used directly, it is queue private. always use | 
|  | 317 | * ->queue_lock. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | */ | 
|  | 152587d | 2005-04-12 16:22:06 -0500 | [diff] [blame] | 319 | spinlock_t		__queue_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | spinlock_t		*queue_lock; | 
|  | 321 |  | 
|  | 322 | /* | 
|  | 323 | * queue kobject | 
|  | 324 | */ | 
|  | 325 | struct kobject kobj; | 
|  | 326 |  | 
|  | 327 | /* | 
|  | 328 | * queue settings | 
|  | 329 | */ | 
|  | 330 | unsigned long		nr_requests;	/* Max # of requests */ | 
|  | 331 | unsigned int		nr_congestion_on; | 
|  | 332 | unsigned int		nr_congestion_off; | 
|  | 333 | unsigned int		nr_batching; | 
|  | 334 |  | 
| James Bottomley | fa0ccd8 | 2008-01-10 11:30:36 -0600 | [diff] [blame] | 335 | void			*dma_drain_buffer; | 
|  | 336 | unsigned int		dma_drain_size; | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 337 | unsigned int		dma_pad_mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | unsigned int		dma_alignment; | 
|  | 339 |  | 
|  | 340 | struct blk_queue_tag	*queue_tags; | 
| Jens Axboe | 6eca900 | 2007-10-25 10:14:47 +0200 | [diff] [blame] | 341 | struct list_head	tag_busy_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 |  | 
| Tejun Heo | 15853af | 2005-11-10 08:52:05 +0100 | [diff] [blame] | 343 | unsigned int		nr_sorted; | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 344 | unsigned int		in_flight[2]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 |  | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 346 | unsigned int		rq_timeout; | 
|  | 347 | struct timer_list	timeout; | 
|  | 348 | struct list_head	timeout_list; | 
|  | 349 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 350 | struct queue_limits	limits; | 
|  | 351 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | /* | 
|  | 353 | * sg stuff | 
|  | 354 | */ | 
|  | 355 | unsigned int		sg_timeout; | 
|  | 356 | unsigned int		sg_reserved_size; | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 357 | int			node; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 358 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 
| Jens Axboe | 2056a78 | 2006-03-23 20:00:26 +0100 | [diff] [blame] | 359 | struct blk_trace	*blk_trace; | 
| Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 360 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | /* | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 362 | * for flush operations | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | */ | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 364 | unsigned int		flush_flags; | 
| Tejun Heo | dd4c133 | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 365 | unsigned int		flush_seq; | 
|  | 366 | int			flush_err; | 
|  | 367 | struct request		flush_rq; | 
|  | 368 | struct request		*orig_flush_rq; | 
|  | 369 | struct list_head	pending_flushes; | 
| Al Viro | 483f4af | 2006-03-18 18:34:37 -0500 | [diff] [blame] | 370 |  | 
|  | 371 | struct mutex		sysfs_lock; | 
| FUJITA Tomonori | d351af0 | 2007-07-09 12:40:35 +0200 | [diff] [blame] | 372 |  | 
|  | 373 | #if defined(CONFIG_BLK_DEV_BSG) | 
|  | 374 | struct bsg_class_device bsg_dev; | 
|  | 375 | #endif | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 376 |  | 
|  | 377 | #ifdef CONFIG_BLK_DEV_THROTTLING | 
|  | 378 | /* Throttle data */ | 
|  | 379 | struct throtl_data *td; | 
|  | 380 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | }; | 
|  | 382 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */ | 
|  | 384 | #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 385 | #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */ | 
|  | 386 | #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | #define QUEUE_FLAG_DEAD		5	/* queue being torn down */ | 
|  | 388 | #define QUEUE_FLAG_REENTER	6	/* Re-entrancy avoidance */ | 
|  | 389 | #define QUEUE_FLAG_PLUGGED	7	/* queue is plugged */ | 
| Jens Axboe | 64521d1 | 2005-10-28 08:30:39 +0200 | [diff] [blame] | 390 | #define QUEUE_FLAG_ELVSWITCH	8	/* don't use elevator, just do FIFO */ | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 391 | #define QUEUE_FLAG_BIDI		9	/* queue supports bidi requests */ | 
| Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 392 | #define QUEUE_FLAG_NOMERGES    10	/* disable merge attempts */ | 
| Jens Axboe | c7c22e4 | 2008-09-13 20:26:01 +0200 | [diff] [blame] | 393 | #define QUEUE_FLAG_SAME_COMP   11	/* force complete on same CPU */ | 
| Jens Axboe | 581d4e2 | 2008-09-14 05:56:33 -0700 | [diff] [blame] | 394 | #define QUEUE_FLAG_FAIL_IO     12	/* fake timeout */ | 
| Kiyoshi Ueda | 4ee5eaf | 2008-09-18 10:46:13 -0400 | [diff] [blame] | 395 | #define QUEUE_FLAG_STACKABLE   13	/* supports request stacking */ | 
| Jens Axboe | a68bbdd | 2008-09-24 13:03:33 +0200 | [diff] [blame] | 396 | #define QUEUE_FLAG_NONROT      14	/* non-rotational device (SSD) */ | 
| Fernando Luis Vázquez Cao | 88e740f | 2008-10-27 18:44:46 +0900 | [diff] [blame] | 397 | #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */ | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 398 | #define QUEUE_FLAG_IO_STAT     15	/* do IO stats */ | 
| Jens Axboe | 79da0644 | 2010-02-23 08:40:43 +0100 | [diff] [blame] | 399 | #define QUEUE_FLAG_DISCARD     16	/* supports DISCARD */ | 
| Jens Axboe | 7f03292 | 2010-02-25 08:48:05 +0100 | [diff] [blame] | 400 | #define QUEUE_FLAG_NOXMERGES   17	/* No extended merges */ | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 401 | #define QUEUE_FLAG_ADD_RANDOM  18	/* Contributes to random pool */ | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 402 | #define QUEUE_FLAG_SECDISCARD  19	/* supports SECDISCARD */ | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 403 |  | 
|  | 404 | #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\ | 
| Jens Axboe | 01e97f6 | 2009-09-03 20:06:47 +0200 | [diff] [blame] | 405 | (1 << QUEUE_FLAG_STACKABLE)	|	\ | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 406 | (1 << QUEUE_FLAG_SAME_COMP)	|	\ | 
|  | 407 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 
| Tejun Heo | 797e7db | 2006-01-06 09:51:03 +0100 | [diff] [blame] | 408 |  | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 409 | static inline int queue_is_locked(struct request_queue *q) | 
|  | 410 | { | 
| Jens Axboe | 7663c1e | 2008-04-29 21:31:27 +0200 | [diff] [blame] | 411 | #ifdef CONFIG_SMP | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 412 | spinlock_t *lock = q->queue_lock; | 
|  | 413 | return lock && spin_is_locked(lock); | 
| Jens Axboe | 7663c1e | 2008-04-29 21:31:27 +0200 | [diff] [blame] | 414 | #else | 
|  | 415 | return 1; | 
|  | 416 | #endif | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 417 | } | 
|  | 418 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 419 | static inline void queue_flag_set_unlocked(unsigned int flag, | 
|  | 420 | struct request_queue *q) | 
|  | 421 | { | 
|  | 422 | __set_bit(flag, &q->queue_flags); | 
|  | 423 | } | 
|  | 424 |  | 
| Jens Axboe | e48ec69 | 2008-07-03 13:18:54 +0200 | [diff] [blame] | 425 | static inline int queue_flag_test_and_clear(unsigned int flag, | 
|  | 426 | struct request_queue *q) | 
|  | 427 | { | 
|  | 428 | WARN_ON_ONCE(!queue_is_locked(q)); | 
|  | 429 |  | 
|  | 430 | if (test_bit(flag, &q->queue_flags)) { | 
|  | 431 | __clear_bit(flag, &q->queue_flags); | 
|  | 432 | return 1; | 
|  | 433 | } | 
|  | 434 |  | 
|  | 435 | return 0; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | static inline int queue_flag_test_and_set(unsigned int flag, | 
|  | 439 | struct request_queue *q) | 
|  | 440 | { | 
|  | 441 | WARN_ON_ONCE(!queue_is_locked(q)); | 
|  | 442 |  | 
|  | 443 | if (!test_bit(flag, &q->queue_flags)) { | 
|  | 444 | __set_bit(flag, &q->queue_flags); | 
|  | 445 | return 0; | 
|  | 446 | } | 
|  | 447 |  | 
|  | 448 | return 1; | 
|  | 449 | } | 
|  | 450 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 451 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 
|  | 452 | { | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 453 | WARN_ON_ONCE(!queue_is_locked(q)); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 454 | __set_bit(flag, &q->queue_flags); | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 | static inline void queue_flag_clear_unlocked(unsigned int flag, | 
|  | 458 | struct request_queue *q) | 
|  | 459 | { | 
|  | 460 | __clear_bit(flag, &q->queue_flags); | 
|  | 461 | } | 
|  | 462 |  | 
| Jens Axboe | 0a7ae2f | 2009-05-20 08:54:31 +0200 | [diff] [blame] | 463 | static inline int queue_in_flight(struct request_queue *q) | 
|  | 464 | { | 
|  | 465 | return q->in_flight[0] + q->in_flight[1]; | 
|  | 466 | } | 
|  | 467 |  | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 468 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 
|  | 469 | { | 
| Linus Torvalds | 8f45c1a | 2008-04-29 10:16:38 -0700 | [diff] [blame] | 470 | WARN_ON_ONCE(!queue_is_locked(q)); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 471 | __clear_bit(flag, &q->queue_flags); | 
|  | 472 | } | 
|  | 473 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | #define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 
|  | 475 | #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 
|  | 476 | #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 
| Alan D. Brunelle | ac9fafa | 2008-04-29 14:44:19 +0200 | [diff] [blame] | 477 | #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 
| Alan D. Brunelle | 488991e | 2010-01-29 09:04:08 +0100 | [diff] [blame] | 478 | #define blk_queue_noxmerges(q)	\ | 
|  | 479 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 
| Jens Axboe | a68bbdd | 2008-09-24 13:03:33 +0200 | [diff] [blame] | 480 | #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 
| Jens Axboe | bc58ba9 | 2009-01-23 10:54:44 +0100 | [diff] [blame] | 481 | #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 
| Jens Axboe | e2e1a14 | 2010-06-09 10:42:09 +0200 | [diff] [blame] | 482 | #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 
| Kiyoshi Ueda | 4ee5eaf | 2008-09-18 10:46:13 -0400 | [diff] [blame] | 483 | #define blk_queue_stackable(q)	\ | 
|  | 484 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 
| Christoph Hellwig | c15227d | 2009-09-30 13:52:12 +0200 | [diff] [blame] | 485 | #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 
| Adrian Hunter | 8d57a98 | 2010-08-11 14:17:49 -0700 | [diff] [blame] | 486 | #define blk_queue_secdiscard(q)	(blk_queue_discard(q) && \ | 
|  | 487 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 489 | #define blk_noretry_request(rq) \ | 
|  | 490 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | 
|  | 491 | REQ_FAILFAST_DRIVER)) | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 492 |  | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 493 | #define blk_account_rq(rq) \ | 
|  | 494 | (((rq)->cmd_flags & REQ_STARTED) && \ | 
|  | 495 | ((rq)->cmd_type == REQ_TYPE_FS || \ | 
|  | 496 | ((rq)->cmd_flags & REQ_DISCARD))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | #define blk_pm_request(rq)	\ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 499 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ | 
|  | 500 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 |  | 
| Jens Axboe | ab780f1 | 2008-08-26 10:25:02 +0200 | [diff] [blame] | 502 | #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1) | 
| FUJITA Tomonori | abae1fd | 2007-07-16 08:52:14 +0200 | [diff] [blame] | 503 | #define blk_bidi_rq(rq)		((rq)->next_rq != NULL) | 
| Kiyoshi Ueda | 336cdb4 | 2007-12-11 17:40:30 -0500 | [diff] [blame] | 504 | /* rq->queuelist of dequeued request must be list_empty() */ | 
|  | 505 | #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 |  | 
|  | 507 | #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist) | 
|  | 508 |  | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 509 | #define rq_data_dir(rq)		((rq)->cmd_flags & 1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 510 |  | 
| Martin K. Petersen | e692cb6 | 2010-12-01 19:41:49 +0100 | [diff] [blame] | 511 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | 
|  | 512 | { | 
|  | 513 | return q->limits.cluster; | 
|  | 514 | } | 
|  | 515 |  | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 516 | /* | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 517 | * We regard a request as sync, if either a read or a sync write | 
| Jens Axboe | 9e2585a | 2006-07-28 09:26:13 +0200 | [diff] [blame] | 518 | */ | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 519 | static inline bool rw_is_sync(unsigned int rw_flags) | 
|  | 520 | { | 
| Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 521 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 522 | } | 
|  | 523 |  | 
|  | 524 | static inline bool rq_is_sync(struct request *rq) | 
|  | 525 | { | 
|  | 526 | return rw_is_sync(rq->cmd_flags); | 
|  | 527 | } | 
|  | 528 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 529 | static inline int blk_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 531 | if (sync) | 
|  | 532 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 
|  | 533 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | } | 
|  | 535 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 536 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 538 | if (sync) | 
|  | 539 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | else | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 541 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | } | 
|  | 543 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 544 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | { | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 546 | if (sync) | 
|  | 547 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | else | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 549 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | } | 
|  | 551 |  | 
|  | 552 |  | 
|  | 553 | /* | 
|  | 554 | * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may | 
|  | 555 | * it already be started by driver. | 
|  | 556 | */ | 
|  | 557 | #define RQ_NOMERGE_FLAGS	\ | 
| Christoph Hellwig | 02e031c | 2010-11-10 14:54:09 +0100 | [diff] [blame] | 558 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | #define rq_mergeable(rq)	\ | 
| David Woodhouse | e17fc0a | 2008-08-09 16:42:20 +0100 | [diff] [blame] | 560 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 
| Christoph Hellwig | 33659eb | 2010-08-07 18:17:56 +0200 | [diff] [blame] | 561 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 
|  | 562 | (rq)->cmd_type == REQ_TYPE_FS)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 |  | 
|  | 564 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | * q->prep_rq_fn return values | 
|  | 566 | */ | 
|  | 567 | #define BLKPREP_OK		0	/* serve it */ | 
|  | 568 | #define BLKPREP_KILL		1	/* fatal error, kill */ | 
|  | 569 | #define BLKPREP_DEFER		2	/* leave on queue */ | 
|  | 570 |  | 
|  | 571 | extern unsigned long blk_max_low_pfn, blk_max_pfn; | 
|  | 572 |  | 
|  | 573 | /* | 
|  | 574 | * standard bounce addresses: | 
|  | 575 | * | 
|  | 576 | * BLK_BOUNCE_HIGH	: bounce all highmem pages | 
|  | 577 | * BLK_BOUNCE_ANY	: don't bounce anything | 
|  | 578 | * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary | 
|  | 579 | */ | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 580 |  | 
|  | 581 | #if BITS_PER_LONG == 32 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT) | 
| Andi Kleen | 2472892 | 2008-04-21 09:51:05 +0200 | [diff] [blame] | 583 | #else | 
|  | 584 | #define BLK_BOUNCE_HIGH		-1ULL | 
|  | 585 | #endif | 
|  | 586 | #define BLK_BOUNCE_ANY		(-1ULL) | 
| FUJITA Tomonori | bfe1723 | 2010-05-31 15:59:03 +0900 | [diff] [blame] | 587 | #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 |  | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 589 | /* | 
|  | 590 | * default timeout for SG_IO if none specified | 
|  | 591 | */ | 
|  | 592 | #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ) | 
| Linus Torvalds | f2f1fa7 | 2008-12-05 14:49:18 -0800 | [diff] [blame] | 593 | #define BLK_MIN_SG_TIMEOUT	(7 * HZ) | 
| Jens Axboe | 3d6392c | 2007-07-09 12:38:05 +0200 | [diff] [blame] | 594 |  | 
| Christoph Lameter | 2a7326b | 2007-07-17 04:03:37 -0700 | [diff] [blame] | 595 | #ifdef CONFIG_BOUNCE | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | extern int init_emergency_isa_pool(void); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 597 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | #else | 
|  | 599 | static inline int init_emergency_isa_pool(void) | 
|  | 600 | { | 
|  | 601 | return 0; | 
|  | 602 | } | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 603 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | { | 
|  | 605 | } | 
|  | 606 | #endif /* CONFIG_MMU */ | 
|  | 607 |  | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 608 | struct rq_map_data { | 
|  | 609 | struct page **pages; | 
|  | 610 | int page_order; | 
|  | 611 | int nr_entries; | 
| FUJITA Tomonori | 56c451f | 2008-12-18 14:49:37 +0900 | [diff] [blame] | 612 | unsigned long offset; | 
| FUJITA Tomonori | 97ae77a | 2008-12-18 14:49:38 +0900 | [diff] [blame] | 613 | int null_mapped; | 
| FUJITA Tomonori | ecb554a | 2009-07-09 14:46:53 +0200 | [diff] [blame] | 614 | int from_user; | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 615 | }; | 
|  | 616 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 617 | struct req_iterator { | 
|  | 618 | int i; | 
|  | 619 | struct bio *bio; | 
|  | 620 | }; | 
|  | 621 |  | 
|  | 622 | /* This should not be used directly - use rq_for_each_segment */ | 
| Jens Axboe | 1e42807 | 2009-02-23 09:03:10 +0100 | [diff] [blame] | 623 | #define for_each_bio(_bio)		\ | 
|  | 624 | for (; _bio; _bio = _bio->bi_next) | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 625 | #define __rq_for_each_bio(_bio, rq)	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | if ((rq->bio))			\ | 
|  | 627 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 
|  | 628 |  | 
| NeilBrown | 5705f70 | 2007-09-25 12:35:59 +0200 | [diff] [blame] | 629 | #define rq_for_each_segment(bvl, _rq, _iter)			\ | 
|  | 630 | __rq_for_each_bio(_iter.bio, _rq)			\ | 
|  | 631 | bio_for_each_segment(bvl, _iter.bio, _iter.i) | 
|  | 632 |  | 
|  | 633 | #define rq_iter_last(rq, _iter)					\ | 
|  | 634 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | 
|  | 635 |  | 
| Ilya Loginov | 2d4dc89 | 2009-11-26 09:16:19 +0100 | [diff] [blame] | 636 | #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
|  | 637 | # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" | 
|  | 638 | #endif | 
|  | 639 | #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE | 
|  | 640 | extern void rq_flush_dcache_pages(struct request *rq); | 
|  | 641 | #else | 
|  | 642 | static inline void rq_flush_dcache_pages(struct request *rq) | 
|  | 643 | { | 
|  | 644 | } | 
|  | 645 | #endif | 
|  | 646 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | extern int blk_register_queue(struct gendisk *disk); | 
|  | 648 | extern void blk_unregister_queue(struct gendisk *disk); | 
|  | 649 | extern void register_disk(struct gendisk *dev); | 
|  | 650 | extern void generic_make_request(struct bio *bio); | 
| FUJITA Tomonori | 2a4aa30 | 2008-04-29 09:54:36 +0200 | [diff] [blame] | 651 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | extern void blk_put_request(struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 653 | extern void __blk_put_request(struct request_queue *, struct request *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 654 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 
| Boaz Harrosh | 79eb63e | 2009-05-17 18:57:15 +0300 | [diff] [blame] | 655 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 
|  | 656 | gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 657 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 
|  | 658 | extern void blk_requeue_request(struct request_queue *, struct request *); | 
| Christoph Hellwig | 66ac028 | 2010-06-18 16:59:42 +0200 | [diff] [blame] | 659 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 
|  | 660 | unsigned int len); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 661 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 662 | extern int blk_lld_busy(struct request_queue *q); | 
| Kiyoshi Ueda | b0fd271 | 2009-06-11 13:10:16 +0200 | [diff] [blame] | 663 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 
|  | 664 | struct bio_set *bs, gfp_t gfp_mask, | 
|  | 665 | int (*bio_ctr)(struct bio *, struct bio *, void *), | 
|  | 666 | void *data); | 
|  | 667 | extern void blk_rq_unprep_clone(struct request *rq); | 
| Kiyoshi Ueda | 82124d6 | 2008-09-18 10:45:38 -0400 | [diff] [blame] | 668 | extern int blk_insert_cloned_request(struct request_queue *q, | 
|  | 669 | struct request *rq); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 670 | extern void blk_plug_device(struct request_queue *); | 
| Jens Axboe | 6c5e0c4 | 2008-08-01 20:31:32 +0200 | [diff] [blame] | 671 | extern void blk_plug_device_unlocked(struct request_queue *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 672 | extern int blk_remove_plug(struct request_queue *); | 
|  | 673 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 
| Al Viro | 74f3c8a | 2007-08-27 15:38:10 -0400 | [diff] [blame] | 674 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
|  | 675 | unsigned int, void __user *); | 
| Al Viro | e915e87 | 2008-09-02 17:16:41 -0400 | [diff] [blame] | 676 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 
|  | 677 | struct scsi_ioctl_command __user *); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 678 |  | 
|  | 679 | /* | 
|  | 680 | * A queue has just exitted congestion.  Note this in the global counter of | 
|  | 681 | * congested queues, and wake up anyone who was waiting for requests to be | 
|  | 682 | * put back. | 
|  | 683 | */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 684 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 685 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 686 | clear_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 687 | } | 
|  | 688 |  | 
|  | 689 | /* | 
|  | 690 | * A queue has just entered congestion.  Flag that in the queue's VM-visible | 
|  | 691 | * state flags and increment the global gounter of congested queues. | 
|  | 692 | */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 693 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 694 | { | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 695 | set_bdi_congested(&q->backing_dev_info, sync); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 696 | } | 
|  | 697 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 698 | extern void blk_start_queue(struct request_queue *q); | 
|  | 699 | extern void blk_stop_queue(struct request_queue *q); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | extern void blk_sync_queue(struct request_queue *q); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 701 | extern void __blk_stop_queue(struct request_queue *q); | 
| Nick Piggin | 75ad23b | 2008-04-29 14:48:33 +0200 | [diff] [blame] | 702 | extern void __blk_run_queue(struct request_queue *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 703 | extern void blk_run_queue(struct request_queue *); | 
| FUJITA Tomonori | a3bce90 | 2008-08-28 16:17:05 +0900 | [diff] [blame] | 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 705 | struct rq_map_data *, void __user *, unsigned long, | 
|  | 706 | gfp_t); | 
| Jens Axboe | 8e5cfc4 | 2006-12-19 11:12:46 +0100 | [diff] [blame] | 707 | extern int blk_rq_unmap_user(struct bio *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 708 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); | 
|  | 709 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, | 
| FUJITA Tomonori | 152e283 | 2008-08-28 16:17:06 +0900 | [diff] [blame] | 710 | struct rq_map_data *, struct sg_iovec *, int, | 
|  | 711 | unsigned int, gfp_t); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 712 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, | 
| James Bottomley | 994ca9a | 2005-06-20 14:11:09 +0200 | [diff] [blame] | 713 | struct request *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 714 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 
| Jens Axboe | 15fc858 | 2006-01-06 10:00:50 +0100 | [diff] [blame] | 715 | struct request *, int, rq_end_io_fn *); | 
| Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 716 | extern void blk_unplug(struct request_queue *q); | 
| Mike Christie | 6e39b69e | 2005-11-11 05:30:24 -0600 | [diff] [blame] | 717 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 718 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | { | 
|  | 720 | return bdev->bd_disk->queue; | 
|  | 721 | } | 
|  | 722 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | /* | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 724 | * blk_rq_pos()			: the current sector | 
|  | 725 | * blk_rq_bytes()		: bytes left in the entire request | 
|  | 726 | * blk_rq_cur_bytes()		: bytes left in the current segment | 
|  | 727 | * blk_rq_err_bytes()		: bytes left till the next error boundary | 
|  | 728 | * blk_rq_sectors()		: sectors left in the entire request | 
|  | 729 | * blk_rq_cur_sectors()		: sectors left in the current segment | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 730 | */ | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 731 | static inline sector_t blk_rq_pos(const struct request *rq) | 
|  | 732 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 733 | return rq->__sector; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 734 | } | 
|  | 735 |  | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 736 | static inline unsigned int blk_rq_bytes(const struct request *rq) | 
|  | 737 | { | 
| Tejun Heo | a2dec7b | 2009-05-07 22:24:44 +0900 | [diff] [blame] | 738 | return rq->__data_len; | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 739 | } | 
|  | 740 |  | 
|  | 741 | static inline int blk_rq_cur_bytes(const struct request *rq) | 
|  | 742 | { | 
|  | 743 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | 
|  | 744 | } | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 745 |  | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 746 | extern unsigned int blk_rq_err_bytes(const struct request *rq); | 
|  | 747 |  | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 748 | static inline unsigned int blk_rq_sectors(const struct request *rq) | 
|  | 749 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 750 | return blk_rq_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 751 | } | 
|  | 752 |  | 
|  | 753 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | 
|  | 754 | { | 
| Tejun Heo | 2e46e8b | 2009-05-07 22:24:41 +0900 | [diff] [blame] | 755 | return blk_rq_cur_bytes(rq) >> 9; | 
| Tejun Heo | 5b93629 | 2009-05-07 22:24:38 +0900 | [diff] [blame] | 756 | } | 
|  | 757 |  | 
| Tejun Heo | 5efccd1 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 758 | /* | 
| Tejun Heo | 9934c8c | 2009-05-08 11:54:16 +0900 | [diff] [blame] | 759 | * Request issue related functions. | 
|  | 760 | */ | 
|  | 761 | extern struct request *blk_peek_request(struct request_queue *q); | 
|  | 762 | extern void blk_start_request(struct request *rq); | 
|  | 763 | extern struct request *blk_fetch_request(struct request_queue *q); | 
|  | 764 |  | 
|  | 765 | /* | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 766 | * Request completion related functions. | 
|  | 767 | * | 
|  | 768 | * blk_update_request() completes given number of bytes and updates | 
|  | 769 | * the request without completing it. | 
|  | 770 | * | 
| Tejun Heo | f06d9a2 | 2009-04-23 11:05:19 +0900 | [diff] [blame] | 771 | * blk_end_request() and friends.  __blk_end_request() must be called | 
|  | 772 | * with the request queue spinlock acquired. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | * | 
|  | 774 | * Several drivers define their own end_request and call | 
| Kiyoshi Ueda | 3bcddea | 2007-12-11 17:52:28 -0500 | [diff] [blame] | 775 | * blk_end_request() for parts of the original function. | 
|  | 776 | * This prevents code duplication in drivers. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | */ | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 778 | extern bool blk_update_request(struct request *rq, int error, | 
|  | 779 | unsigned int nr_bytes); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 780 | extern bool blk_end_request(struct request *rq, int error, | 
|  | 781 | unsigned int nr_bytes); | 
|  | 782 | extern void blk_end_request_all(struct request *rq, int error); | 
|  | 783 | extern bool blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 784 | extern bool blk_end_request_err(struct request *rq, int error); | 
| FUJITA Tomonori | b1f7449 | 2009-05-11 17:56:09 +0900 | [diff] [blame] | 785 | extern bool __blk_end_request(struct request *rq, int error, | 
|  | 786 | unsigned int nr_bytes); | 
|  | 787 | extern void __blk_end_request_all(struct request *rq, int error); | 
|  | 788 | extern bool __blk_end_request_cur(struct request *rq, int error); | 
| Tejun Heo | 80a761f | 2009-07-03 17:48:17 +0900 | [diff] [blame] | 789 | extern bool __blk_end_request_err(struct request *rq, int error); | 
| Tejun Heo | 2e60e02 | 2009-04-23 11:05:18 +0900 | [diff] [blame] | 790 |  | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 791 | extern void blk_complete_request(struct request *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 792 | extern void __blk_complete_request(struct request *); | 
|  | 793 | extern void blk_abort_request(struct request *); | 
| Mike Anderson | 11914a5 | 2008-09-13 20:31:27 +0200 | [diff] [blame] | 794 | extern void blk_abort_queue(struct request_queue *); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 795 | extern void blk_unprep_request(struct request *); | 
| Jens Axboe | ff856ba | 2006-01-09 16:02:34 +0100 | [diff] [blame] | 796 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | * Access functions for manipulating queue properties | 
|  | 799 | */ | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 800 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, | 
| Christoph Lameter | 1946089 | 2005-06-23 00:08:19 -0700 | [diff] [blame] | 801 | spinlock_t *lock, int node_id); | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 802 | extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, | 
|  | 803 | request_fn_proc *, | 
|  | 804 | spinlock_t *, int node_id); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 805 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | 
| Mike Snitzer | 01effb0 | 2010-05-11 08:57:42 +0200 | [diff] [blame] | 806 | extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | 
|  | 807 | request_fn_proc *, spinlock_t *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 808 | extern void blk_cleanup_queue(struct request_queue *); | 
|  | 809 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 
|  | 810 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 
| Mike Snitzer | 72d4cd9 | 2010-12-17 08:34:20 +0100 | [diff] [blame] | 811 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 
| Martin K. Petersen | 086fa5f | 2010-02-26 00:20:38 -0500 | [diff] [blame] | 812 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 813 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 814 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 
| Christoph Hellwig | 67efc92 | 2009-09-30 13:54:20 +0200 | [diff] [blame] | 815 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 
|  | 816 | unsigned int max_discard_sectors); | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 817 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 818 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 819 | extern void blk_queue_alignment_offset(struct request_queue *q, | 
|  | 820 | unsigned int alignment); | 
| Martin K. Petersen | 7c958e3 | 2009-07-31 11:49:11 -0400 | [diff] [blame] | 821 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 822 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 
| Martin K. Petersen | 3c5820c | 2009-09-11 21:54:52 +0200 | [diff] [blame] | 823 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 824 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 
| Martin K. Petersen | e475bba | 2009-06-16 08:23:52 +0200 | [diff] [blame] | 825 | extern void blk_set_default_limits(struct queue_limits *lim); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 826 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 
|  | 827 | sector_t offset); | 
| Martin K. Petersen | 17be8c2 | 2010-01-11 03:21:49 -0500 | [diff] [blame] | 828 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 
|  | 829 | sector_t offset); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 830 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 
|  | 831 | sector_t offset); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 832 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 
| Tejun Heo | e3790c7 | 2008-03-04 11:18:17 +0100 | [diff] [blame] | 833 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 
| FUJITA Tomonori | 27f8221 | 2008-07-04 09:30:03 +0200 | [diff] [blame] | 834 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 
| Tejun Heo | 2fb98e8 | 2008-02-19 11:36:53 +0100 | [diff] [blame] | 835 | extern int blk_queue_dma_drain(struct request_queue *q, | 
|  | 836 | dma_drain_needed_fn *dma_drain_needed, | 
|  | 837 | void *buf, unsigned int size); | 
| Kiyoshi Ueda | ef9e3fa | 2008-10-01 16:12:15 +0200 | [diff] [blame] | 838 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 839 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 
|  | 840 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 
| James Bottomley | 28018c2 | 2010-07-01 19:49:17 +0900 | [diff] [blame] | 841 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 842 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 
|  | 843 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 
| James Bottomley | 11c3e68 | 2007-12-31 16:37:00 -0600 | [diff] [blame] | 844 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 845 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 
| Jens Axboe | 242f9dc | 2008-09-14 05:55:09 -0700 | [diff] [blame] | 846 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 
|  | 847 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 
| Tejun Heo | 4913efe | 2010-09-03 11:56:16 +0200 | [diff] [blame] | 848 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 851 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | extern void blk_dump_rq_flags(struct request *, char *); | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 853 | extern void generic_unplug_device(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | extern long nr_blockdev_pages(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 856 | int blk_get_queue(struct request_queue *); | 
|  | 857 | struct request_queue *blk_alloc_queue(gfp_t); | 
|  | 858 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 
|  | 859 | extern void blk_put_queue(struct request_queue *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 |  | 
|  | 861 | /* | 
|  | 862 | * tag stuff | 
|  | 863 | */ | 
| Jens Axboe | 4aff5e2 | 2006-08-10 08:44:47 +0200 | [diff] [blame] | 864 | #define blk_rq_tagged(rq)		((rq)->cmd_flags & REQ_QUEUED) | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 865 | extern int blk_queue_start_tag(struct request_queue *, struct request *); | 
|  | 866 | extern struct request *blk_queue_find_tag(struct request_queue *, int); | 
|  | 867 | extern void blk_queue_end_tag(struct request_queue *, struct request *); | 
|  | 868 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); | 
|  | 869 | extern void blk_queue_free_tags(struct request_queue *); | 
|  | 870 | extern int blk_queue_resize_tags(struct request_queue *, int); | 
|  | 871 | extern void blk_queue_invalidate_tags(struct request_queue *); | 
| James Bottomley | 492dfb4 | 2006-08-30 15:48:45 -0400 | [diff] [blame] | 872 | extern struct blk_queue_tag *blk_init_tags(int); | 
|  | 873 | extern void blk_free_tags(struct blk_queue_tag *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 |  | 
| David C Somayajulu | f583f49 | 2006-10-04 08:27:25 +0200 | [diff] [blame] | 875 | static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | 
|  | 876 | int tag) | 
|  | 877 | { | 
|  | 878 | if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) | 
|  | 879 | return NULL; | 
|  | 880 | return bqt->tag_index[tag]; | 
|  | 881 | } | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 882 |  | 
|  | 883 | #define BLKDEV_DISCARD_SECURE  0x01    /* secure discard */ | 
|  | 884 |  | 
|  | 885 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); | 
| Dmitry Monakhov | fbd9b09 | 2010-04-28 17:55:06 +0400 | [diff] [blame] | 886 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 
|  | 887 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 
| Dmitry Monakhov | 3f14d79 | 2010-04-28 17:55:09 +0400 | [diff] [blame] | 888 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 
| Christoph Hellwig | dd3932e | 2010-09-16 20:51:46 +0200 | [diff] [blame] | 889 | sector_t nr_sects, gfp_t gfp_mask); | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 890 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, | 
|  | 891 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 892 | { | 
| Christoph Hellwig | 2cf6d26 | 2010-08-18 05:29:10 -0400 | [diff] [blame] | 893 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), | 
|  | 894 | nr_blocks << (sb->s_blocksize_bits - 9), | 
|  | 895 | gfp_mask, flags); | 
| David Woodhouse | fb2dce8 | 2008-08-05 18:01:53 +0100 | [diff] [blame] | 896 | } | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 897 | static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 898 | sector_t nr_blocks, gfp_t gfp_mask) | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 899 | { | 
|  | 900 | return blkdev_issue_zeroout(sb->s_bdev, | 
|  | 901 | block << (sb->s_blocksize_bits - 9), | 
|  | 902 | nr_blocks << (sb->s_blocksize_bits - 9), | 
| Theodore Ts'o | a107e5a | 2010-10-27 23:44:47 -0400 | [diff] [blame] | 903 | gfp_mask); | 
| Lukas Czerner | e6fa0be | 2010-10-27 21:30:04 -0400 | [diff] [blame] | 904 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 |  | 
| Jens Axboe | 018e044 | 2009-06-26 16:27:10 +0200 | [diff] [blame] | 906 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 
| Adel Gadllah | 0b07de8 | 2008-06-26 13:48:27 +0200 | [diff] [blame] | 907 |  | 
| Martin K. Petersen | eb28d31 | 2010-02-26 00:20:37 -0500 | [diff] [blame] | 908 | enum blk_default_limits { | 
|  | 909 | BLK_MAX_SEGMENTS	= 128, | 
|  | 910 | BLK_SAFE_MAX_SECTORS	= 255, | 
|  | 911 | BLK_DEF_MAX_SECTORS	= 1024, | 
|  | 912 | BLK_MAX_SEGMENT_SIZE	= 65536, | 
|  | 913 | BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL, | 
|  | 914 | }; | 
| Milan Broz | 0e435ac | 2008-12-03 12:55:08 +0100 | [diff] [blame] | 915 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 
|  | 917 |  | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 918 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) | 
|  | 919 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 920 | return q->limits.bounce_pfn; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 921 | } | 
|  | 922 |  | 
|  | 923 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | 
|  | 924 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 925 | return q->limits.seg_boundary_mask; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 926 | } | 
|  | 927 |  | 
|  | 928 | static inline unsigned int queue_max_sectors(struct request_queue *q) | 
|  | 929 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 930 | return q->limits.max_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 931 | } | 
|  | 932 |  | 
|  | 933 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | 
|  | 934 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 935 | return q->limits.max_hw_sectors; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 936 | } | 
|  | 937 |  | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 938 | static inline unsigned short queue_max_segments(struct request_queue *q) | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 939 | { | 
| Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 940 | return q->limits.max_segments; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 941 | } | 
|  | 942 |  | 
|  | 943 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 
|  | 944 | { | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 945 | return q->limits.max_segment_size; | 
| Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 946 | } | 
|  | 947 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 948 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 949 | { | 
|  | 950 | int retval = 512; | 
|  | 951 |  | 
| Martin K. Petersen | 025146e | 2009-05-22 17:17:51 -0400 | [diff] [blame] | 952 | if (q && q->limits.logical_block_size) | 
|  | 953 | retval = q->limits.logical_block_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 |  | 
|  | 955 | return retval; | 
|  | 956 | } | 
|  | 957 |  | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 958 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 | { | 
| Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 960 | return queue_logical_block_size(bdev_get_queue(bdev)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | } | 
|  | 962 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 963 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | 
|  | 964 | { | 
|  | 965 | return q->limits.physical_block_size; | 
|  | 966 | } | 
|  | 967 |  | 
| Martin K. Petersen | 892b6f9 | 2010-10-13 21:18:03 +0200 | [diff] [blame] | 968 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 969 | { | 
|  | 970 | return queue_physical_block_size(bdev_get_queue(bdev)); | 
|  | 971 | } | 
|  | 972 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 973 | static inline unsigned int queue_io_min(struct request_queue *q) | 
|  | 974 | { | 
|  | 975 | return q->limits.io_min; | 
|  | 976 | } | 
|  | 977 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 978 | static inline int bdev_io_min(struct block_device *bdev) | 
|  | 979 | { | 
|  | 980 | return queue_io_min(bdev_get_queue(bdev)); | 
|  | 981 | } | 
|  | 982 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 983 | static inline unsigned int queue_io_opt(struct request_queue *q) | 
|  | 984 | { | 
|  | 985 | return q->limits.io_opt; | 
|  | 986 | } | 
|  | 987 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 988 | static inline int bdev_io_opt(struct block_device *bdev) | 
|  | 989 | { | 
|  | 990 | return queue_io_opt(bdev_get_queue(bdev)); | 
|  | 991 | } | 
|  | 992 |  | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 993 | static inline int queue_alignment_offset(struct request_queue *q) | 
|  | 994 | { | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 995 | if (q->limits.misaligned) | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 996 | return -1; | 
|  | 997 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 998 | return q->limits.alignment_offset; | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 999 | } | 
|  | 1000 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1001 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1002 | { | 
|  | 1003 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1004 | unsigned int alignment = (sector << 9) & (granularity - 1); | 
| Martin K. Petersen | 81744ee | 2009-12-29 08:35:35 +0100 | [diff] [blame] | 1005 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1006 | return (granularity + lim->alignment_offset - alignment) | 
|  | 1007 | & (granularity - 1); | 
| Martin K. Petersen | c72758f | 2009-05-22 17:17:53 -0400 | [diff] [blame] | 1008 | } | 
|  | 1009 |  | 
| Martin K. Petersen | ac481c2 | 2009-10-03 20:52:01 +0200 | [diff] [blame] | 1010 | static inline int bdev_alignment_offset(struct block_device *bdev) | 
|  | 1011 | { | 
|  | 1012 | struct request_queue *q = bdev_get_queue(bdev); | 
|  | 1013 |  | 
|  | 1014 | if (q->limits.misaligned) | 
|  | 1015 | return -1; | 
|  | 1016 |  | 
|  | 1017 | if (bdev != bdev->bd_contains) | 
|  | 1018 | return bdev->bd_part->alignment_offset; | 
|  | 1019 |  | 
|  | 1020 | return q->limits.alignment_offset; | 
|  | 1021 | } | 
|  | 1022 |  | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1023 | static inline int queue_discard_alignment(struct request_queue *q) | 
|  | 1024 | { | 
|  | 1025 | if (q->limits.discard_misaligned) | 
|  | 1026 | return -1; | 
|  | 1027 |  | 
|  | 1028 | return q->limits.discard_alignment; | 
|  | 1029 | } | 
|  | 1030 |  | 
| Martin K. Petersen | e03a72e | 2010-01-11 03:21:51 -0500 | [diff] [blame] | 1031 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1032 | { | 
| Martin K. Petersen | dd3d145 | 2010-01-11 03:21:48 -0500 | [diff] [blame] | 1033 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 
|  | 1034 |  | 
|  | 1035 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 
|  | 1036 | & (lim->discard_granularity - 1); | 
| Martin K. Petersen | 86b3728 | 2009-11-10 11:50:21 +0100 | [diff] [blame] | 1037 | } | 
|  | 1038 |  | 
| Martin K. Petersen | 98262f2 | 2009-12-03 09:24:48 +0100 | [diff] [blame] | 1039 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 
|  | 1040 | { | 
|  | 1041 | if (q->limits.discard_zeroes_data == 1) | 
|  | 1042 | return 1; | 
|  | 1043 |  | 
|  | 1044 | return 0; | 
|  | 1045 | } | 
|  | 1046 |  | 
|  | 1047 | static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) | 
|  | 1048 | { | 
|  | 1049 | return queue_discard_zeroes_data(bdev_get_queue(bdev)); | 
|  | 1050 | } | 
|  | 1051 |  | 
| Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 1052 | static inline int queue_dma_alignment(struct request_queue *q) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | { | 
| Pete Wyckoff | 482eb68 | 2008-01-01 10:23:02 -0500 | [diff] [blame] | 1054 | return q ? q->dma_alignment : 511; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | } | 
|  | 1056 |  | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1057 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1058 | unsigned int len) | 
|  | 1059 | { | 
|  | 1060 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 
| Namhyung Kim | 1441779 | 2010-09-15 13:08:27 +0200 | [diff] [blame] | 1061 | return !(addr & alignment) && !(len & alignment); | 
| FUJITA Tomonori | 8790407 | 2008-08-28 15:05:58 +0900 | [diff] [blame] | 1062 | } | 
|  | 1063 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | /* assumes size > 256 */ | 
|  | 1065 | static inline unsigned int blksize_bits(unsigned int size) | 
|  | 1066 | { | 
|  | 1067 | unsigned int bits = 8; | 
|  | 1068 | do { | 
|  | 1069 | bits++; | 
|  | 1070 | size >>= 1; | 
|  | 1071 | } while (size > 256); | 
|  | 1072 | return bits; | 
|  | 1073 | } | 
|  | 1074 |  | 
| Adrian Bunk | 2befb9e | 2005-09-10 00:27:17 -0700 | [diff] [blame] | 1075 | static inline unsigned int block_size(struct block_device *bdev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | { | 
|  | 1077 | return bdev->bd_block_size; | 
|  | 1078 | } | 
|  | 1079 |  | 
|  | 1080 | typedef struct {struct page *v;} Sector; | 
|  | 1081 |  | 
|  | 1082 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 
|  | 1083 |  | 
|  | 1084 | static inline void put_dev_sector(Sector p) | 
|  | 1085 | { | 
|  | 1086 | page_cache_release(p.v); | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | struct work_struct; | 
| Jens Axboe | 18887ad | 2008-07-28 13:08:45 +0200 | [diff] [blame] | 1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1091 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 |  | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1093 | #ifdef CONFIG_BLK_CGROUP | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1094 | /* | 
|  | 1095 | * This should not be using sched_clock(). A real patch is in progress | 
|  | 1096 | * to fix this up, until that is in place we need to disable preemption | 
|  | 1097 | * around sched_clock() in this function and set_io_start_time_ns(). | 
|  | 1098 | */ | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1099 | static inline void set_start_time_ns(struct request *req) | 
|  | 1100 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1101 | preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1102 | req->start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1103 | preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1104 | } | 
|  | 1105 |  | 
|  | 1106 | static inline void set_io_start_time_ns(struct request *req) | 
|  | 1107 | { | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1108 | preempt_disable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1109 | req->io_start_time_ns = sched_clock(); | 
| Jens Axboe | 28f4197 | 2010-06-01 12:23:18 +0200 | [diff] [blame] | 1110 | preempt_enable(); | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1111 | } | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1112 |  | 
|  | 1113 | static inline uint64_t rq_start_time_ns(struct request *req) | 
|  | 1114 | { | 
|  | 1115 | return req->start_time_ns; | 
|  | 1116 | } | 
|  | 1117 |  | 
|  | 1118 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
|  | 1119 | { | 
|  | 1120 | return req->io_start_time_ns; | 
|  | 1121 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1122 | #else | 
|  | 1123 | static inline void set_start_time_ns(struct request *req) {} | 
|  | 1124 | static inline void set_io_start_time_ns(struct request *req) {} | 
| Divyesh Shah | 84c124d | 2010-04-09 08:31:19 +0200 | [diff] [blame] | 1125 | static inline uint64_t rq_start_time_ns(struct request *req) | 
|  | 1126 | { | 
|  | 1127 | return 0; | 
|  | 1128 | } | 
|  | 1129 | static inline uint64_t rq_io_start_time_ns(struct request *req) | 
|  | 1130 | { | 
|  | 1131 | return 0; | 
|  | 1132 | } | 
| Divyesh Shah | 9195291 | 2010-04-01 15:01:41 -0700 | [diff] [blame] | 1133 | #endif | 
|  | 1134 |  | 
| Vivek Goyal | e43473b | 2010-09-15 17:06:35 -0400 | [diff] [blame] | 1135 | #ifdef CONFIG_BLK_DEV_THROTTLING | 
|  | 1136 | extern int blk_throtl_init(struct request_queue *q); | 
|  | 1137 | extern void blk_throtl_exit(struct request_queue *q); | 
|  | 1138 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 
|  | 1139 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | 
|  | 1140 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | 
|  | 1141 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 
|  | 1142 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 
|  | 1143 | { | 
|  | 1144 | return 0; | 
|  | 1145 | } | 
|  | 1146 |  | 
|  | 1147 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 
|  | 1148 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 
|  | 1149 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | 
|  | 1150 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | 
|  | 1151 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 
|  | 1152 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1153 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 
|  | 1154 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 
|  | 1155 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 
|  | 1156 | MODULE_ALIAS("block-major-" __stringify(major) "-*") | 
|  | 1157 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1158 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 
|  | 1159 |  | 
| Jens Axboe | b24498d | 2008-06-27 09:12:09 +0200 | [diff] [blame] | 1160 | #define INTEGRITY_FLAG_READ	2	/* verify data integrity on read */ | 
|  | 1161 | #define INTEGRITY_FLAG_WRITE	4	/* generate data integrity on write */ | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1162 |  | 
|  | 1163 | struct blk_integrity_exchg { | 
|  | 1164 | void			*prot_buf; | 
|  | 1165 | void			*data_buf; | 
|  | 1166 | sector_t		sector; | 
|  | 1167 | unsigned int		data_size; | 
|  | 1168 | unsigned short		sector_size; | 
|  | 1169 | const char		*disk_name; | 
|  | 1170 | }; | 
|  | 1171 |  | 
|  | 1172 | typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); | 
|  | 1173 | typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); | 
|  | 1174 | typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); | 
|  | 1175 | typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); | 
|  | 1176 |  | 
|  | 1177 | struct blk_integrity { | 
|  | 1178 | integrity_gen_fn	*generate_fn; | 
|  | 1179 | integrity_vrfy_fn	*verify_fn; | 
|  | 1180 | integrity_set_tag_fn	*set_tag_fn; | 
|  | 1181 | integrity_get_tag_fn	*get_tag_fn; | 
|  | 1182 |  | 
|  | 1183 | unsigned short		flags; | 
|  | 1184 | unsigned short		tuple_size; | 
|  | 1185 | unsigned short		sector_size; | 
|  | 1186 | unsigned short		tag_size; | 
|  | 1187 |  | 
|  | 1188 | const char		*name; | 
|  | 1189 |  | 
|  | 1190 | struct kobject		kobj; | 
|  | 1191 | }; | 
|  | 1192 |  | 
|  | 1193 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 
|  | 1194 | extern void blk_integrity_unregister(struct gendisk *); | 
| Martin K. Petersen | ad7fce9 | 2008-10-01 03:38:39 -0400 | [diff] [blame] | 1195 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1196 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 
|  | 1197 | struct scatterlist *); | 
|  | 1198 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | 
|  | 1199 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | 
|  | 1200 | struct request *); | 
|  | 1201 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | 
|  | 1202 | struct bio *); | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1203 |  | 
| Jens Axboe | b04accc | 2008-10-02 12:53:22 +0200 | [diff] [blame] | 1204 | static inline | 
|  | 1205 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 
|  | 1206 | { | 
|  | 1207 | return bdev->bd_disk->integrity; | 
|  | 1208 | } | 
|  | 1209 |  | 
| Martin K. Petersen | b02739b | 2008-10-02 18:47:49 +0200 | [diff] [blame] | 1210 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 
|  | 1211 | { | 
|  | 1212 | return disk->integrity; | 
|  | 1213 | } | 
|  | 1214 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1215 | static inline int blk_integrity_rq(struct request *rq) | 
|  | 1216 | { | 
| Martin K. Petersen | d442cc4 | 2008-07-16 16:09:06 -0400 | [diff] [blame] | 1217 | if (rq->bio == NULL) | 
|  | 1218 | return 0; | 
|  | 1219 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1220 | return bio_integrity(rq->bio); | 
|  | 1221 | } | 
|  | 1222 |  | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1223 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | 
|  | 1224 | unsigned int segs) | 
|  | 1225 | { | 
|  | 1226 | q->limits.max_integrity_segments = segs; | 
|  | 1227 | } | 
|  | 1228 |  | 
|  | 1229 | static inline unsigned short | 
|  | 1230 | queue_max_integrity_segments(struct request_queue *q) | 
|  | 1231 | { | 
|  | 1232 | return q->limits.max_integrity_segments; | 
|  | 1233 | } | 
|  | 1234 |  | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1235 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 
|  | 1236 |  | 
|  | 1237 | #define blk_integrity_rq(rq)			(0) | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1238 | #define blk_rq_count_integrity_sg(a, b)		(0) | 
|  | 1239 | #define blk_rq_map_integrity_sg(a, b, c)	(0) | 
| Jens Axboe | b04accc | 2008-10-02 12:53:22 +0200 | [diff] [blame] | 1240 | #define bdev_get_integrity(a)			(0) | 
| Martin K. Petersen | b02739b | 2008-10-02 18:47:49 +0200 | [diff] [blame] | 1241 | #define blk_get_integrity(a)			(0) | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1242 | #define blk_integrity_compare(a, b)		(0) | 
|  | 1243 | #define blk_integrity_register(a, b)		(0) | 
|  | 1244 | #define blk_integrity_unregister(a)		do { } while (0); | 
| Martin K. Petersen | 13f05c8 | 2010-09-10 20:50:10 +0200 | [diff] [blame] | 1245 | #define blk_queue_max_integrity_segments(a, b)	do { } while (0); | 
|  | 1246 | #define queue_max_integrity_segments(a)		(0) | 
|  | 1247 | #define blk_integrity_merge_rq(a, b, c)		(0) | 
|  | 1248 | #define blk_integrity_merge_bio(a, b, c)	(0) | 
| Martin K. Petersen | 7ba1ba1 | 2008-06-30 20:04:41 +0200 | [diff] [blame] | 1249 |  | 
|  | 1250 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 
|  | 1251 |  | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1252 | struct block_device_operations { | 
| Al Viro | d4430d62 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1253 | int (*open) (struct block_device *, fmode_t); | 
|  | 1254 | int (*release) (struct gendisk *, fmode_t); | 
| Al Viro | d4430d62 | 2008-03-02 09:09:22 -0500 | [diff] [blame] | 1255 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
|  | 1256 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1257 | int (*direct_access) (struct block_device *, sector_t, | 
|  | 1258 | void **, unsigned long *); | 
|  | 1259 | int (*media_changed) (struct gendisk *); | 
| Tejun Heo | c3e33e0 | 2010-05-15 20:09:29 +0200 | [diff] [blame] | 1260 | void (*unlock_native_capacity) (struct gendisk *); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1261 | int (*revalidate_disk) (struct gendisk *); | 
|  | 1262 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 
| Nitin Gupta | b3a27d0 | 2010-05-17 11:02:43 +0530 | [diff] [blame] | 1263 | /* this callback is with swap_lock and sometimes page table lock held */ | 
|  | 1264 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 
| Al Viro | 08f8585 | 2007-10-08 13:26:20 -0400 | [diff] [blame] | 1265 | struct module *owner; | 
|  | 1266 | }; | 
|  | 1267 |  | 
| Al Viro | 633a08b | 2007-08-29 20:34:12 -0400 | [diff] [blame] | 1268 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 
|  | 1269 | unsigned long); | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1270 | #else /* CONFIG_BLOCK */ | 
|  | 1271 | /* | 
|  | 1272 | * stubs for when the block layer is configured out | 
|  | 1273 | */ | 
|  | 1274 | #define buffer_heads_over_limit 0 | 
|  | 1275 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1276 | static inline long nr_blockdev_pages(void) | 
|  | 1277 | { | 
|  | 1278 | return 0; | 
|  | 1279 | } | 
|  | 1280 |  | 
| David Howells | 9361401 | 2006-09-30 20:45:40 +0200 | [diff] [blame] | 1281 | #endif /* CONFIG_BLOCK */ | 
|  | 1282 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | #endif |