blob: e9710971fce64b90be4a4dab984e02dbb6a8dee8 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
4#include <linux/sched.h>
5
6#ifdef CONFIG_BLOCK
7
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13#include <linux/pagemap.h>
14#include <linux/backing-dev.h>
15#include <linux/wait.h>
16#include <linux/mempool.h>
17#include <linux/bio.h>
18#include <linux/stringify.h>
19#include <linux/gfp.h>
20#include <linux/bsg.h>
21#include <linux/smp.h>
22
23#include <asm/scatterlist.h>
24
25struct module;
26struct scsi_ioctl_command;
27
28struct request_queue;
29struct elevator_queue;
30struct request_pm_state;
31struct blk_trace;
32struct request;
33struct sg_io_hdr;
34struct bsg_job;
35
36#define BLKDEV_MIN_RQ 4
37#define BLKDEV_MAX_RQ 128
38
39struct request;
40typedef void (rq_end_io_fn)(struct request *, int);
41
42struct request_list {
43 int count[2];
44 int starved[2];
45 int elvpriv;
46 mempool_t *rq_pool;
47 wait_queue_head_t wait[2];
48};
49
50enum rq_cmd_type_bits {
51 REQ_TYPE_FS = 1,
52 REQ_TYPE_BLOCK_PC,
53 REQ_TYPE_SENSE,
54 REQ_TYPE_PM_SUSPEND,
55 REQ_TYPE_PM_RESUME,
56 REQ_TYPE_PM_SHUTDOWN,
57 REQ_TYPE_SPECIAL,
58 REQ_TYPE_ATA_TASKFILE,
59 REQ_TYPE_ATA_PC,
60};
61
62#define BLK_MAX_CDB 16
63
64struct request {
65 struct list_head queuelist;
66 struct call_single_data csd;
67
68 struct request_queue *q;
69
70 unsigned int cmd_flags;
71 enum rq_cmd_type_bits cmd_type;
72 unsigned long atomic_flags;
73
74 int cpu;
75
76
77 unsigned int __data_len;
78 sector_t __sector;
79
80 struct bio *bio;
81 struct bio *biotail;
82
83 struct hlist_node hash;
84 union {
85 struct rb_node rb_node;
86 void *completion_data;
87 };
88
89 union {
90 struct {
91 struct io_cq *icq;
92 void *priv[2];
93 } elv;
94
95 struct {
96 unsigned int seq;
97 struct list_head list;
98 rq_end_io_fn *saved_end_io;
99 } flush;
100 };
101
102 struct gendisk *rq_disk;
103 struct hd_struct *part;
104 unsigned long start_time;
105#ifdef CONFIG_BLK_CGROUP
106 unsigned long long start_time_ns;
107 unsigned long long io_start_time_ns;
108#endif
109 unsigned short nr_phys_segments;
110#if defined(CONFIG_BLK_DEV_INTEGRITY)
111 unsigned short nr_integrity_segments;
112#endif
113
114 unsigned short ioprio;
115
116 int ref_count;
117
118 void *special;
119 char *buffer;
120
121 int tag;
122 int errors;
123
124 unsigned char __cmd[BLK_MAX_CDB];
125 unsigned char *cmd;
126 unsigned short cmd_len;
127
128 unsigned int extra_len;
129 unsigned int sense_len;
130 unsigned int resid_len;
131 void *sense;
132
133 unsigned long deadline;
134 struct list_head timeout_list;
135 unsigned int timeout;
136 int retries;
137
138 rq_end_io_fn *end_io;
139 void *end_io_data;
140
141
142 struct request *next_rq;
143};
144
145static inline unsigned short req_get_ioprio(struct request *req)
146{
147 return req->ioprio;
148}
149
150struct request_pm_state
151{
152
153 int pm_step;
154
155 u32 pm_state;
156 void* data;
157};
158
159#include <linux/elevator.h>
160
161typedef void (request_fn_proc) (struct request_queue *q);
162typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
163typedef int (prep_rq_fn) (struct request_queue *, struct request *);
164typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
165
166struct bio_vec;
167struct bvec_merge_data {
168 struct block_device *bi_bdev;
169 sector_t bi_sector;
170 unsigned bi_size;
171 unsigned long bi_rw;
172};
173typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
174 struct bio_vec *);
175typedef void (softirq_done_fn)(struct request *);
176typedef int (dma_drain_needed_fn)(struct request *);
177typedef int (lld_busy_fn) (struct request_queue *q);
178typedef int (bsg_job_fn) (struct bsg_job *);
179
180enum blk_eh_timer_return {
181 BLK_EH_NOT_HANDLED,
182 BLK_EH_HANDLED,
183 BLK_EH_RESET_TIMER,
184};
185
186typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
187
188enum blk_queue_state {
189 Queue_down,
190 Queue_up,
191};
192
193struct blk_queue_tag {
194 struct request **tag_index;
195 unsigned long *tag_map;
196 int busy;
197 int max_depth;
198 int real_max_depth;
199 atomic_t refcnt;
200};
201
202#define BLK_SCSI_MAX_CMDS (256)
203#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
204
205struct queue_limits {
206 unsigned long bounce_pfn;
207 unsigned long seg_boundary_mask;
208
209 unsigned int max_hw_sectors;
210 unsigned int max_sectors;
211 unsigned int max_segment_size;
212 unsigned int physical_block_size;
213 unsigned int alignment_offset;
214 unsigned int io_min;
215 unsigned int io_opt;
216 unsigned int max_discard_sectors;
217 unsigned int discard_granularity;
218 unsigned int discard_alignment;
219
220 unsigned short logical_block_size;
221 unsigned short max_segments;
222 unsigned short max_integrity_segments;
223
224 unsigned char misaligned;
225 unsigned char discard_misaligned;
226 unsigned char cluster;
227 unsigned char discard_zeroes_data;
228};
229
230struct request_queue {
231 struct list_head queue_head;
232 struct request *last_merge;
233 struct elevator_queue *elevator;
234
235 struct request_list rq;
236
237 request_fn_proc *request_fn;
238 make_request_fn *make_request_fn;
239 prep_rq_fn *prep_rq_fn;
240 unprep_rq_fn *unprep_rq_fn;
241 merge_bvec_fn *merge_bvec_fn;
242 softirq_done_fn *softirq_done_fn;
243 rq_timed_out_fn *rq_timed_out_fn;
244 dma_drain_needed_fn *dma_drain_needed;
245 lld_busy_fn *lld_busy_fn;
246
247 sector_t end_sector;
248 struct request *boundary_rq;
249
250 struct delayed_work delay_work;
251
252 struct backing_dev_info backing_dev_info;
253
254 void *queuedata;
255
256 unsigned long queue_flags;
257
258 int id;
259
260 gfp_t bounce_gfp;
261
262 spinlock_t __queue_lock;
263 spinlock_t *queue_lock;
264
265 struct kobject kobj;
266
267 unsigned long nr_requests;
268 unsigned int nr_congestion_on;
269 unsigned int nr_congestion_off;
270 unsigned int nr_batching;
271
272 unsigned int dma_drain_size;
273 void *dma_drain_buffer;
274 unsigned int dma_pad_mask;
275 unsigned int dma_alignment;
276
277 struct blk_queue_tag *queue_tags;
278 struct list_head tag_busy_list;
279
280 unsigned int nr_sorted;
281 unsigned int in_flight[2];
282
283 unsigned int rq_timeout;
284 struct timer_list timeout;
285 struct list_head timeout_list;
286
287 struct list_head icq_list;
288
289 struct queue_limits limits;
290
291 unsigned int sg_timeout;
292 unsigned int sg_reserved_size;
293 int node;
294#ifdef CONFIG_BLK_DEV_IO_TRACE
295 struct blk_trace *blk_trace;
296#endif
297 unsigned int flush_flags;
298 unsigned int flush_not_queueable:1;
299 unsigned int flush_queue_delayed:1;
300 unsigned int flush_pending_idx:1;
301 unsigned int flush_running_idx:1;
302 unsigned long flush_pending_since;
303 struct list_head flush_queue[2];
304 struct list_head flush_data_in_flight;
305 struct request flush_rq;
306
307 struct mutex sysfs_lock;
308
309#if defined(CONFIG_BLK_DEV_BSG)
310 bsg_job_fn *bsg_job_fn;
311 int bsg_job_size;
312 struct bsg_class_device bsg_dev;
313#endif
314
315#ifdef CONFIG_BLK_DEV_THROTTLING
316
317 struct throtl_data *td;
318#endif
319};
320
321#define QUEUE_FLAG_QUEUED 1
322#define QUEUE_FLAG_STOPPED 2
323#define QUEUE_FLAG_SYNCFULL 3
324#define QUEUE_FLAG_ASYNCFULL 4
325#define QUEUE_FLAG_DEAD 5
326#define QUEUE_FLAG_ELVSWITCH 6
327#define QUEUE_FLAG_BIDI 7
328#define QUEUE_FLAG_NOMERGES 8
329#define QUEUE_FLAG_SAME_COMP 9
330#define QUEUE_FLAG_FAIL_IO 10
331#define QUEUE_FLAG_STACKABLE 11
332#define QUEUE_FLAG_NONROT 12
333#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT
334#define QUEUE_FLAG_IO_STAT 13
335#define QUEUE_FLAG_DISCARD 14
336#define QUEUE_FLAG_NOXMERGES 15
337#define QUEUE_FLAG_ADD_RANDOM 16
338#define QUEUE_FLAG_SECDISCARD 17
339#define QUEUE_FLAG_SAME_FORCE 18
340#define QUEUE_FLAG_SANITIZE 19
341
342#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
343 (1 << QUEUE_FLAG_STACKABLE) | \
344 (1 << QUEUE_FLAG_SAME_COMP) | \
345 (1 << QUEUE_FLAG_ADD_RANDOM))
346
347static inline void queue_lockdep_assert_held(struct request_queue *q)
348{
349 if (q->queue_lock)
350 lockdep_assert_held(q->queue_lock);
351}
352
353static inline void queue_flag_set_unlocked(unsigned int flag,
354 struct request_queue *q)
355{
356 __set_bit(flag, &q->queue_flags);
357}
358
359static inline int queue_flag_test_and_clear(unsigned int flag,
360 struct request_queue *q)
361{
362 queue_lockdep_assert_held(q);
363
364 if (test_bit(flag, &q->queue_flags)) {
365 __clear_bit(flag, &q->queue_flags);
366 return 1;
367 }
368
369 return 0;
370}
371
372static inline int queue_flag_test_and_set(unsigned int flag,
373 struct request_queue *q)
374{
375 queue_lockdep_assert_held(q);
376
377 if (!test_bit(flag, &q->queue_flags)) {
378 __set_bit(flag, &q->queue_flags);
379 return 0;
380 }
381
382 return 1;
383}
384
385static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
386{
387 queue_lockdep_assert_held(q);
388 __set_bit(flag, &q->queue_flags);
389}
390
391static inline void queue_flag_clear_unlocked(unsigned int flag,
392 struct request_queue *q)
393{
394 __clear_bit(flag, &q->queue_flags);
395}
396
397static inline int queue_in_flight(struct request_queue *q)
398{
399 return q->in_flight[0] + q->in_flight[1];
400}
401
402static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
403{
404 queue_lockdep_assert_held(q);
405 __clear_bit(flag, &q->queue_flags);
406}
407
408#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
409#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
410#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
411#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
412#define blk_queue_noxmerges(q) \
413 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
414#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
415#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
416#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
417#define blk_queue_stackable(q) \
418 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
419#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
420#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE, &(q)->queue_flags)
421#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
422 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
423
424#define blk_noretry_request(rq) \
425 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
426 REQ_FAILFAST_DRIVER))
427
428#define blk_account_rq(rq) \
429 (((rq)->cmd_flags & REQ_STARTED) && \
430 ((rq)->cmd_type == REQ_TYPE_FS || \
431 ((rq)->cmd_flags & REQ_DISCARD)))
432
433#define blk_pm_request(rq) \
434 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
435 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
436
437#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
438#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
439#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
440
441#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
442
443#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
444
445static inline unsigned int blk_queue_cluster(struct request_queue *q)
446{
447 return q->limits.cluster;
448}
449
450static inline bool rw_is_sync(unsigned int rw_flags)
451{
452 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
453}
454
455static inline bool rq_is_sync(struct request *rq)
456{
457 return rw_is_sync(rq->cmd_flags);
458}
459
460static inline int blk_queue_full(struct request_queue *q, int sync)
461{
462 if (sync)
463 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
464 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
465}
466
467static inline void blk_set_queue_full(struct request_queue *q, int sync)
468{
469 if (sync)
470 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
471 else
472 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
473}
474
475static inline void blk_clear_queue_full(struct request_queue *q, int sync)
476{
477 if (sync)
478 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
479 else
480 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
481}
482
483
484#define RQ_NOMERGE_FLAGS \
485 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
486#define rq_mergeable(rq) \
487 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
488 (((rq)->cmd_flags & REQ_DISCARD) || \
489 (rq)->cmd_type == REQ_TYPE_FS))
490
491#define BLKPREP_OK 0
492#define BLKPREP_KILL 1
493#define BLKPREP_DEFER 2
494
495extern unsigned long blk_max_low_pfn, blk_max_pfn;
496
497
498#if BITS_PER_LONG == 32
499#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
500#else
501#define BLK_BOUNCE_HIGH -1ULL
502#endif
503#define BLK_BOUNCE_ANY (-1ULL)
504#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
505
506#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
507#define BLK_MIN_SG_TIMEOUT (7 * HZ)
508
509#ifdef CONFIG_BOUNCE
510extern int init_emergency_isa_pool(void);
511extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
512#else
513static inline int init_emergency_isa_pool(void)
514{
515 return 0;
516}
517static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
518{
519}
520#endif
521
522struct rq_map_data {
523 struct page **pages;
524 int page_order;
525 int nr_entries;
526 unsigned long offset;
527 int null_mapped;
528 int from_user;
529};
530
531struct req_iterator {
532 int i;
533 struct bio *bio;
534};
535
536#define for_each_bio(_bio) \
537 for (; _bio; _bio = _bio->bi_next)
538#define __rq_for_each_bio(_bio, rq) \
539 if ((rq->bio)) \
540 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
541
542#define rq_for_each_segment(bvl, _rq, _iter) \
543 __rq_for_each_bio(_iter.bio, _rq) \
544 bio_for_each_segment(bvl, _iter.bio, _iter.i)
545
546#define rq_iter_last(rq, _iter) \
547 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
548
549#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
550# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
551#endif
552#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
553extern void rq_flush_dcache_pages(struct request *rq);
554#else
555static inline void rq_flush_dcache_pages(struct request *rq)
556{
557}
558#endif
559
560extern int blk_register_queue(struct gendisk *disk);
561extern void blk_unregister_queue(struct gendisk *disk);
562extern void generic_make_request(struct bio *bio);
563extern void blk_rq_init(struct request_queue *q, struct request *rq);
564extern void blk_put_request(struct request *);
565extern void __blk_put_request(struct request_queue *, struct request *);
566extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
567extern struct request *blk_make_request(struct request_queue *, struct bio *,
568 gfp_t);
569extern void blk_requeue_request(struct request_queue *, struct request *);
570extern void blk_add_request_payload(struct request *rq, struct page *page,
571 unsigned int len);
572extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
573extern int blk_lld_busy(struct request_queue *q);
574extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
575 struct bio_set *bs, gfp_t gfp_mask,
576 int (*bio_ctr)(struct bio *, struct bio *, void *),
577 void *data);
578extern void blk_rq_unprep_clone(struct request *rq);
579extern int blk_insert_cloned_request(struct request_queue *q,
580 struct request *rq);
581extern void blk_delay_queue(struct request_queue *, unsigned long);
582extern void blk_recount_segments(struct request_queue *, struct bio *);
583extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
584extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
585 unsigned int, void __user *);
586extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
587 unsigned int, void __user *);
588extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
589 struct scsi_ioctl_command __user *);
590
591extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
592
593static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
594{
595 clear_bdi_congested(&q->backing_dev_info, sync);
596}
597
598static inline void blk_set_queue_congested(struct request_queue *q, int sync)
599{
600 set_bdi_congested(&q->backing_dev_info, sync);
601}
602
603extern void blk_start_queue(struct request_queue *q);
604extern void blk_stop_queue(struct request_queue *q);
605extern void blk_sync_queue(struct request_queue *q);
606extern void __blk_stop_queue(struct request_queue *q);
607extern void __blk_run_queue(struct request_queue *q);
608extern void blk_run_queue(struct request_queue *);
609extern void blk_run_queue_async(struct request_queue *q);
610extern int blk_rq_map_user(struct request_queue *, struct request *,
611 struct rq_map_data *, void __user *, unsigned long,
612 gfp_t);
613extern int blk_rq_unmap_user(struct bio *);
614extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
615extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
616 struct rq_map_data *, struct sg_iovec *, int,
617 unsigned int, gfp_t);
618extern int blk_execute_rq(struct request_queue *, struct gendisk *,
619 struct request *, int);
620extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
621 struct request *, int, rq_end_io_fn *);
622
623static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
624{
625 return bdev->bd_disk->queue;
626}
627
628static inline sector_t blk_rq_pos(const struct request *rq)
629{
630 return rq->__sector;
631}
632
633static inline unsigned int blk_rq_bytes(const struct request *rq)
634{
635 return rq->__data_len;
636}
637
638static inline int blk_rq_cur_bytes(const struct request *rq)
639{
640 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
641}
642
643extern unsigned int blk_rq_err_bytes(const struct request *rq);
644
645static inline unsigned int blk_rq_sectors(const struct request *rq)
646{
647 return blk_rq_bytes(rq) >> 9;
648}
649
650static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
651{
652 return blk_rq_cur_bytes(rq) >> 9;
653}
654
655extern struct request *blk_peek_request(struct request_queue *q);
656extern void blk_start_request(struct request *rq);
657extern struct request *blk_fetch_request(struct request_queue *q);
658
659extern bool blk_update_request(struct request *rq, int error,
660 unsigned int nr_bytes);
661extern bool blk_end_request(struct request *rq, int error,
662 unsigned int nr_bytes);
663extern void blk_end_request_all(struct request *rq, int error);
664extern bool blk_end_request_cur(struct request *rq, int error);
665extern bool blk_end_request_err(struct request *rq, int error);
666extern bool __blk_end_request(struct request *rq, int error,
667 unsigned int nr_bytes);
668extern void __blk_end_request_all(struct request *rq, int error);
669extern bool __blk_end_request_cur(struct request *rq, int error);
670extern bool __blk_end_request_err(struct request *rq, int error);
671
672extern void blk_complete_request(struct request *);
673extern void __blk_complete_request(struct request *);
674extern void blk_abort_request(struct request *);
675extern void blk_abort_queue(struct request_queue *);
676extern void blk_unprep_request(struct request *);
677
678extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
679 spinlock_t *lock, int node_id);
680extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
681extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
682 request_fn_proc *, spinlock_t *);
683extern void blk_cleanup_queue(struct request_queue *);
684extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
685extern void blk_queue_bounce_limit(struct request_queue *, u64);
686extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
687extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
688extern void blk_queue_max_segments(struct request_queue *, unsigned short);
689extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
690extern void blk_queue_max_discard_sectors(struct request_queue *q,
691 unsigned int max_discard_sectors);
692extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
693extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
694extern void blk_queue_alignment_offset(struct request_queue *q,
695 unsigned int alignment);
696extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
697extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
698extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
699extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
700extern void blk_set_default_limits(struct queue_limits *lim);
701extern void blk_set_stacking_limits(struct queue_limits *lim);
702extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
703 sector_t offset);
704extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
705 sector_t offset);
706extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
707 sector_t offset);
708extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
709extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
710extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
711extern int blk_queue_dma_drain(struct request_queue *q,
712 dma_drain_needed_fn *dma_drain_needed,
713 void *buf, unsigned int size);
714extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
715extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
716extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
717extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
718extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
719extern void blk_queue_dma_alignment(struct request_queue *, int);
720extern void blk_queue_update_dma_alignment(struct request_queue *, int);
721extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
722extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
723extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
724extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
725extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
726extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
727
728extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
729extern void blk_dump_rq_flags(struct request *, char *);
730extern long nr_blockdev_pages(void);
731
732bool __must_check blk_get_queue(struct request_queue *);
733struct request_queue *blk_alloc_queue(gfp_t);
734struct request_queue *blk_alloc_queue_node(gfp_t, int);
735extern void blk_put_queue(struct request_queue *);
736
737struct blk_plug {
738 unsigned long magic;
739 struct list_head list;
740 struct list_head cb_list;
741 unsigned int should_sort;
742};
743#define BLK_MAX_REQUEST_COUNT 16
744
745struct blk_plug_cb {
746 struct list_head list;
747 void (*callback)(struct blk_plug_cb *);
748};
749
750extern void blk_start_plug(struct blk_plug *);
751extern void blk_finish_plug(struct blk_plug *);
752extern void blk_flush_plug_list(struct blk_plug *, bool);
753
754static inline void blk_flush_plug(struct task_struct *tsk)
755{
756 struct blk_plug *plug = tsk->plug;
757
758 if (plug)
759 blk_flush_plug_list(plug, false);
760}
761
762static inline void blk_schedule_flush_plug(struct task_struct *tsk)
763{
764 struct blk_plug *plug = tsk->plug;
765
766 if (plug)
767 blk_flush_plug_list(plug, true);
768}
769
770static inline bool blk_needs_flush_plug(struct task_struct *tsk)
771{
772 struct blk_plug *plug = tsk->plug;
773
774 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
775}
776
777#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
778extern int blk_queue_start_tag(struct request_queue *, struct request *);
779extern struct request *blk_queue_find_tag(struct request_queue *, int);
780extern void blk_queue_end_tag(struct request_queue *, struct request *);
781extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
782extern void blk_queue_free_tags(struct request_queue *);
783extern int blk_queue_resize_tags(struct request_queue *, int);
784extern void blk_queue_invalidate_tags(struct request_queue *);
785extern struct blk_queue_tag *blk_init_tags(int);
786extern void blk_free_tags(struct blk_queue_tag *);
787
788static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
789 int tag)
790{
791 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
792 return NULL;
793 return bqt->tag_index[tag];
794}
795
796#define BLKDEV_DISCARD_SECURE 0x01
797
798extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
799extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
800 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
801extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask);
802extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
803 sector_t nr_sects, gfp_t gfp_mask);
804static inline int sb_issue_discard(struct super_block *sb, sector_t block,
805 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
806{
807 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
808 nr_blocks << (sb->s_blocksize_bits - 9),
809 gfp_mask, flags);
810}
811static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
812 sector_t nr_blocks, gfp_t gfp_mask)
813{
814 return blkdev_issue_zeroout(sb->s_bdev,
815 block << (sb->s_blocksize_bits - 9),
816 nr_blocks << (sb->s_blocksize_bits - 9),
817 gfp_mask);
818}
819
820extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
821
822enum blk_default_limits {
823 BLK_MAX_SEGMENTS = 128,
824 BLK_SAFE_MAX_SECTORS = 255,
825 BLK_DEF_MAX_SECTORS = 4096,
826 BLK_MAX_SEGMENT_SIZE = 65536,
827 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
828};
829
830#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
831
832static inline unsigned long queue_bounce_pfn(struct request_queue *q)
833{
834 return q->limits.bounce_pfn;
835}
836
837static inline unsigned long queue_segment_boundary(struct request_queue *q)
838{
839 return q->limits.seg_boundary_mask;
840}
841
842static inline unsigned int queue_max_sectors(struct request_queue *q)
843{
844 return q->limits.max_sectors;
845}
846
847static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
848{
849 return q->limits.max_hw_sectors;
850}
851
852static inline unsigned short queue_max_segments(struct request_queue *q)
853{
854 return q->limits.max_segments;
855}
856
857static inline unsigned int queue_max_segment_size(struct request_queue *q)
858{
859 return q->limits.max_segment_size;
860}
861
862static inline unsigned short queue_logical_block_size(struct request_queue *q)
863{
864 int retval = 512;
865
866 if (q && q->limits.logical_block_size)
867 retval = q->limits.logical_block_size;
868
869 return retval;
870}
871
872static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
873{
874 return queue_logical_block_size(bdev_get_queue(bdev));
875}
876
877static inline unsigned int queue_physical_block_size(struct request_queue *q)
878{
879 return q->limits.physical_block_size;
880}
881
882static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
883{
884 return queue_physical_block_size(bdev_get_queue(bdev));
885}
886
887static inline unsigned int queue_io_min(struct request_queue *q)
888{
889 return q->limits.io_min;
890}
891
892static inline int bdev_io_min(struct block_device *bdev)
893{
894 return queue_io_min(bdev_get_queue(bdev));
895}
896
897static inline unsigned int queue_io_opt(struct request_queue *q)
898{
899 return q->limits.io_opt;
900}
901
902static inline int bdev_io_opt(struct block_device *bdev)
903{
904 return queue_io_opt(bdev_get_queue(bdev));
905}
906
907static inline int queue_alignment_offset(struct request_queue *q)
908{
909 if (q->limits.misaligned)
910 return -1;
911
912 return q->limits.alignment_offset;
913}
914
915static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
916{
917 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
918 unsigned int alignment = (sector << 9) & (granularity - 1);
919
920 return (granularity + lim->alignment_offset - alignment)
921 & (granularity - 1);
922}
923
924static inline int bdev_alignment_offset(struct block_device *bdev)
925{
926 struct request_queue *q = bdev_get_queue(bdev);
927
928 if (q->limits.misaligned)
929 return -1;
930
931 if (bdev != bdev->bd_contains)
932 return bdev->bd_part->alignment_offset;
933
934 return q->limits.alignment_offset;
935}
936
937static inline int queue_discard_alignment(struct request_queue *q)
938{
939 if (q->limits.discard_misaligned)
940 return -1;
941
942 return q->limits.discard_alignment;
943}
944
945static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
946{
947 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
948
949 if (!lim->max_discard_sectors)
950 return 0;
951
952 return (lim->discard_granularity + lim->discard_alignment - alignment)
953 & (lim->discard_granularity - 1);
954}
955
956static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
957{
958 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
959 return 1;
960
961 return 0;
962}
963
964static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
965{
966 return queue_discard_zeroes_data(bdev_get_queue(bdev));
967}
968
969static inline int queue_dma_alignment(struct request_queue *q)
970{
971 return q ? q->dma_alignment : 511;
972}
973
974static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
975 unsigned int len)
976{
977 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
978 return !(addr & alignment) && !(len & alignment);
979}
980
981static inline unsigned int blksize_bits(unsigned int size)
982{
983 unsigned int bits = 8;
984 do {
985 bits++;
986 size >>= 1;
987 } while (size > 256);
988 return bits;
989}
990
991static inline unsigned int block_size(struct block_device *bdev)
992{
993 return bdev->bd_block_size;
994}
995
996static inline bool queue_flush_queueable(struct request_queue *q)
997{
998 return !q->flush_not_queueable;
999}
1000
1001typedef struct {struct page *v;} Sector;
1002
1003unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1004
1005static inline void put_dev_sector(Sector p)
1006{
1007 page_cache_release(p.v);
1008}
1009
1010struct work_struct;
1011int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1012
1013#ifdef CONFIG_BLK_CGROUP
1014static inline void set_start_time_ns(struct request *req)
1015{
1016 preempt_disable();
1017 req->start_time_ns = sched_clock();
1018 preempt_enable();
1019}
1020
1021static inline void set_io_start_time_ns(struct request *req)
1022{
1023 preempt_disable();
1024 req->io_start_time_ns = sched_clock();
1025 preempt_enable();
1026}
1027
1028static inline uint64_t rq_start_time_ns(struct request *req)
1029{
1030 return req->start_time_ns;
1031}
1032
1033static inline uint64_t rq_io_start_time_ns(struct request *req)
1034{
1035 return req->io_start_time_ns;
1036}
1037#else
1038static inline void set_start_time_ns(struct request *req) {}
1039static inline void set_io_start_time_ns(struct request *req) {}
1040static inline uint64_t rq_start_time_ns(struct request *req)
1041{
1042 return 0;
1043}
1044static inline uint64_t rq_io_start_time_ns(struct request *req)
1045{
1046 return 0;
1047}
1048#endif
1049
1050#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1051 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1052#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1053 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1054
1055#if defined(CONFIG_BLK_DEV_INTEGRITY)
1056
1057#define INTEGRITY_FLAG_READ 2
1058#define INTEGRITY_FLAG_WRITE 4
1059
1060struct blk_integrity_exchg {
1061 void *prot_buf;
1062 void *data_buf;
1063 sector_t sector;
1064 unsigned int data_size;
1065 unsigned short sector_size;
1066 const char *disk_name;
1067};
1068
1069typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1070typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1071typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1072typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1073
1074struct blk_integrity {
1075 integrity_gen_fn *generate_fn;
1076 integrity_vrfy_fn *verify_fn;
1077 integrity_set_tag_fn *set_tag_fn;
1078 integrity_get_tag_fn *get_tag_fn;
1079
1080 unsigned short flags;
1081 unsigned short tuple_size;
1082 unsigned short sector_size;
1083 unsigned short tag_size;
1084
1085 const char *name;
1086
1087 struct kobject kobj;
1088};
1089
1090extern bool blk_integrity_is_initialized(struct gendisk *);
1091extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1092extern void blk_integrity_unregister(struct gendisk *);
1093extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1094extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1095 struct scatterlist *);
1096extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1097extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1098 struct request *);
1099extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1100 struct bio *);
1101
1102static inline
1103struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1104{
1105 return bdev->bd_disk->integrity;
1106}
1107
1108static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1109{
1110 return disk->integrity;
1111}
1112
1113static inline int blk_integrity_rq(struct request *rq)
1114{
1115 if (rq->bio == NULL)
1116 return 0;
1117
1118 return bio_integrity(rq->bio);
1119}
1120
1121static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1122 unsigned int segs)
1123{
1124 q->limits.max_integrity_segments = segs;
1125}
1126
1127static inline unsigned short
1128queue_max_integrity_segments(struct request_queue *q)
1129{
1130 return q->limits.max_integrity_segments;
1131}
1132
1133#else
1134
1135struct bio;
1136struct block_device;
1137struct gendisk;
1138struct blk_integrity;
1139
1140static inline int blk_integrity_rq(struct request *rq)
1141{
1142 return 0;
1143}
1144static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1145 struct bio *b)
1146{
1147 return 0;
1148}
1149static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1150 struct bio *b,
1151 struct scatterlist *s)
1152{
1153 return 0;
1154}
1155static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1156{
1157 return 0;
1158}
1159static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1160{
1161 return NULL;
1162}
1163static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1164{
1165 return 0;
1166}
1167static inline int blk_integrity_register(struct gendisk *d,
1168 struct blk_integrity *b)
1169{
1170 return 0;
1171}
1172static inline void blk_integrity_unregister(struct gendisk *d)
1173{
1174}
1175static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1176 unsigned int segs)
1177{
1178}
1179static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1180{
1181 return 0;
1182}
1183static inline int blk_integrity_merge_rq(struct request_queue *rq,
1184 struct request *r1,
1185 struct request *r2)
1186{
1187 return 0;
1188}
1189static inline int blk_integrity_merge_bio(struct request_queue *rq,
1190 struct request *r,
1191 struct bio *b)
1192{
1193 return 0;
1194}
1195static inline bool blk_integrity_is_initialized(struct gendisk *g)
1196{
1197 return 0;
1198}
1199
1200#endif
1201
1202struct block_device_operations {
1203 int (*open) (struct block_device *, fmode_t);
1204 int (*release) (struct gendisk *, fmode_t);
1205 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1206 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1207 int (*direct_access) (struct block_device *, sector_t,
1208 void **, unsigned long *);
1209 unsigned int (*check_events) (struct gendisk *disk,
1210 unsigned int clearing);
1211
1212 int (*media_changed) (struct gendisk *);
1213 void (*unlock_native_capacity) (struct gendisk *);
1214 int (*revalidate_disk) (struct gendisk *);
1215 int (*getgeo)(struct block_device *, struct hd_geometry *);
1216
1217 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1218 struct module *owner;
1219};
1220
1221extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1222 unsigned long);
1223#else
1224#define buffer_heads_over_limit 0
1225
1226static inline long nr_blockdev_pages(void)
1227{
1228 return 0;
1229}
1230
1231struct blk_plug {
1232};
1233
1234static inline void blk_start_plug(struct blk_plug *plug)
1235{
1236}
1237
1238static inline void blk_finish_plug(struct blk_plug *plug)
1239{
1240}
1241
1242static inline void blk_flush_plug(struct task_struct *task)
1243{
1244}
1245
1246static inline void blk_schedule_flush_plug(struct task_struct *task)
1247{
1248}
1249
1250
1251static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1252{
1253 return false;
1254}
1255
1256#endif
1257
1258#endif