blob: 77dbc4be4b7d3b17f31369c2b8a3c9989b9f28bc [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef WRITEBACK_H
2#define WRITEBACK_H
3
4#include <linux/sched.h>
5#include <linux/fs.h>
6
7DECLARE_PER_CPU(int, dirty_throttle_leaks);
8
9/*
10 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
11 *
12 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
13 *
14 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
15 * time) for the dirty pages to drop, unless written enough pages.
16 *
17 * The global dirty threshold is normally equal to the global dirty limit,
18 * except when the system suddenly allocates a lot of anonymous memory and
19 * knocks down the global dirty threshold quickly, in which case the global
20 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
21 */
22#define DIRTY_SCOPE 8
23#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
24
25struct backing_dev_info;
26
27enum writeback_sync_modes {
28 WB_SYNC_NONE,
29 WB_SYNC_ALL,
30};
31
32enum wb_reason {
33 WB_REASON_BACKGROUND,
34 WB_REASON_TRY_TO_FREE_PAGES,
35 WB_REASON_SYNC,
36 WB_REASON_PERIODIC,
37 WB_REASON_LAPTOP_TIMER,
38 WB_REASON_FREE_MORE_MEM,
39 WB_REASON_FS_FREE_SPACE,
40 WB_REASON_FORKER_THREAD,
41
42 WB_REASON_MAX,
43};
44extern const char *wb_reason_name[];
45
46struct writeback_control {
47 enum writeback_sync_modes sync_mode;
48 long nr_to_write; /* Write this many pages, and decrement
49 this for each page written */
50 long pages_skipped; /* Pages which were not written */
51
52 loff_t range_start;
53 loff_t range_end;
54
55 unsigned for_kupdate:1;
56 unsigned for_background:1;
57 unsigned tagged_writepages:1;
58 unsigned for_reclaim:1;
59 unsigned range_cyclic:1;
60};
61
62
63struct bdi_writeback;
64int inode_wait(void *);
65void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
66void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
67 enum wb_reason reason);
68int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason);
69int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr,
70 enum wb_reason reason);
71void sync_inodes_sb(struct super_block *);
72long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
73 enum wb_reason reason);
74long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
75void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
76
77static inline void wait_on_inode(struct inode *inode)
78{
79 might_sleep();
80 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
81}
82static inline void inode_sync_wait(struct inode *inode)
83{
84 might_sleep();
85 wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
86 TASK_UNINTERRUPTIBLE);
87}
88
89
90#ifdef CONFIG_BLOCK
91void laptop_io_completion(struct backing_dev_info *info);
92void laptop_sync_completion(void);
93void laptop_mode_sync(struct work_struct *work);
94void laptop_mode_timer_fn(unsigned long data);
95#else
96static inline void laptop_sync_completion(void) { }
97#endif
98void throttle_vm_writeout(gfp_t gfp_mask);
99bool zone_dirty_ok(struct zone *zone);
100
101extern unsigned long global_dirty_limit;
102
103extern int dirty_background_ratio;
104extern unsigned long dirty_background_bytes;
105extern int vm_dirty_ratio;
106extern unsigned long vm_dirty_bytes;
107extern unsigned int dirty_writeback_interval;
108extern unsigned int dirty_expire_interval;
109extern int vm_highmem_is_dirtyable;
110extern int block_dump;
111extern int laptop_mode;
112
113extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
114 void __user *buffer, size_t *lenp,
115 loff_t *ppos);
116extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
117 void __user *buffer, size_t *lenp,
118 loff_t *ppos);
119extern int dirty_ratio_handler(struct ctl_table *table, int write,
120 void __user *buffer, size_t *lenp,
121 loff_t *ppos);
122extern int dirty_bytes_handler(struct ctl_table *table, int write,
123 void __user *buffer, size_t *lenp,
124 loff_t *ppos);
125
126struct ctl_table;
127int dirty_writeback_centisecs_handler(struct ctl_table *, int,
128 void __user *, size_t *, loff_t *);
129
130void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
131unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
132 unsigned long dirty);
133
134void __bdi_update_bandwidth(struct backing_dev_info *bdi,
135 unsigned long thresh,
136 unsigned long bg_thresh,
137 unsigned long dirty,
138 unsigned long bdi_thresh,
139 unsigned long bdi_dirty,
140 unsigned long start_time);
141
142void page_writeback_init(void);
143void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
144 unsigned long nr_pages_dirtied);
145
146static inline void
147balance_dirty_pages_ratelimited(struct address_space *mapping)
148{
149 balance_dirty_pages_ratelimited_nr(mapping, 1);
150}
151
152typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
153 void *data);
154
155int generic_writepages(struct address_space *mapping,
156 struct writeback_control *wbc);
157void tag_pages_for_writeback(struct address_space *mapping,
158 pgoff_t start, pgoff_t end);
159int write_cache_pages(struct address_space *mapping,
160 struct writeback_control *wbc, writepage_t writepage,
161 void *data);
162int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
163void set_page_dirty_balance(struct page *page, int page_mkwrite);
164void writeback_set_ratelimit(void);
165void tag_pages_for_writeback(struct address_space *mapping,
166 pgoff_t start, pgoff_t end);
167
168void account_page_redirty(struct page *page);
169
170extern int nr_pdflush_threads;
171
172
173#endif