| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 1 |  | 
 | 2 | #include <linux/wait.h> | 
 | 3 | #include <linux/backing-dev.h> | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 4 | #include <linux/kthread.h> | 
 | 5 | #include <linux/freezer.h> | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 6 | #include <linux/fs.h> | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 7 | #include <linux/pagemap.h> | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 8 | #include <linux/mm.h> | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 9 | #include <linux/sched.h> | 
 | 10 | #include <linux/module.h> | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 11 | #include <linux/writeback.h> | 
 | 12 | #include <linux/device.h> | 
| Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 13 | #include <trace/events/writeback.h> | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 14 |  | 
| Jens Axboe | c3c5320 | 2010-04-22 11:37:01 +0200 | [diff] [blame] | 15 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); | 
 | 16 |  | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 17 | struct backing_dev_info default_backing_dev_info = { | 
| Jens Axboe | d993831 | 2009-06-12 14:45:52 +0200 | [diff] [blame] | 18 | 	.name		= "default", | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 19 | 	.ra_pages	= VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, | 
 | 20 | 	.state		= 0, | 
 | 21 | 	.capabilities	= BDI_CAP_MAP_COPY, | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 22 | }; | 
 | 23 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 24 |  | 
| Jörn Engel | 5129a46 | 2010-04-25 08:54:42 +0200 | [diff] [blame] | 25 | struct backing_dev_info noop_backing_dev_info = { | 
 | 26 | 	.name		= "noop", | 
| Jan Kara | 976e48f | 2010-09-21 11:48:55 +0200 | [diff] [blame] | 27 | 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK, | 
| Jörn Engel | 5129a46 | 2010-04-25 08:54:42 +0200 | [diff] [blame] | 28 | }; | 
 | 29 | EXPORT_SYMBOL_GPL(noop_backing_dev_info); | 
 | 30 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 31 | static struct class *bdi_class; | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 32 |  | 
 | 33 | /* | 
 | 34 |  * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as | 
 | 35 |  * reader side protection for bdi_pending_list. bdi_list has RCU reader side | 
 | 36 |  * locking. | 
 | 37 |  */ | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 38 | DEFINE_SPINLOCK(bdi_lock); | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 39 | LIST_HEAD(bdi_list); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 40 | LIST_HEAD(bdi_pending_list); | 
 | 41 |  | 
 | 42 | static struct task_struct *sync_supers_tsk; | 
 | 43 | static struct timer_list sync_supers_timer; | 
 | 44 |  | 
 | 45 | static int bdi_sync_supers(void *); | 
 | 46 | static void sync_supers_timer_fn(unsigned long); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 47 |  | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 48 | void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) | 
 | 49 | { | 
 | 50 | 	if (wb1 < wb2) { | 
 | 51 | 		spin_lock(&wb1->list_lock); | 
 | 52 | 		spin_lock_nested(&wb2->list_lock, 1); | 
 | 53 | 	} else { | 
 | 54 | 		spin_lock(&wb2->list_lock); | 
 | 55 | 		spin_lock_nested(&wb1->list_lock, 1); | 
 | 56 | 	} | 
 | 57 | } | 
 | 58 |  | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 59 | #ifdef CONFIG_DEBUG_FS | 
 | 60 | #include <linux/debugfs.h> | 
 | 61 | #include <linux/seq_file.h> | 
 | 62 |  | 
 | 63 | static struct dentry *bdi_debug_root; | 
 | 64 |  | 
 | 65 | static void bdi_debug_init(void) | 
 | 66 | { | 
 | 67 | 	bdi_debug_root = debugfs_create_dir("bdi", NULL); | 
 | 68 | } | 
 | 69 |  | 
 | 70 | static int bdi_debug_stats_show(struct seq_file *m, void *v) | 
 | 71 | { | 
 | 72 | 	struct backing_dev_info *bdi = m->private; | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 73 | 	struct bdi_writeback *wb = &bdi->wb; | 
| David Rientjes | 364aeb2 | 2009-01-06 14:39:29 -0800 | [diff] [blame] | 74 | 	unsigned long background_thresh; | 
 | 75 | 	unsigned long dirty_thresh; | 
 | 76 | 	unsigned long bdi_thresh; | 
| Gustavo F. Padovan | 345227d | 2011-05-20 21:23:37 +0200 | [diff] [blame] | 77 | 	unsigned long nr_dirty, nr_io, nr_more_io; | 
| Jens Axboe | f09b00d | 2009-05-25 09:08:21 +0200 | [diff] [blame] | 78 | 	struct inode *inode; | 
 | 79 |  | 
| Gustavo F. Padovan | 345227d | 2011-05-20 21:23:37 +0200 | [diff] [blame] | 80 | 	nr_dirty = nr_io = nr_more_io = 0; | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 81 | 	spin_lock(&wb->list_lock); | 
| Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 82 | 	list_for_each_entry(inode, &wb->b_dirty, i_wb_list) | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 83 | 		nr_dirty++; | 
| Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 84 | 	list_for_each_entry(inode, &wb->b_io, i_wb_list) | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 85 | 		nr_io++; | 
| Nick Piggin | 7ccf19a | 2010-10-21 11:49:30 +1100 | [diff] [blame] | 86 | 	list_for_each_entry(inode, &wb->b_more_io, i_wb_list) | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 87 | 		nr_more_io++; | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 88 | 	spin_unlock(&wb->list_lock); | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 89 |  | 
| Wu Fengguang | 16c4042 | 2010-08-11 14:17:39 -0700 | [diff] [blame] | 90 | 	global_dirty_limits(&background_thresh, &dirty_thresh); | 
 | 91 | 	bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 92 |  | 
 | 93 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | 
 | 94 | 	seq_printf(m, | 
| Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 95 | 		   "BdiWriteback:       %10lu kB\n" | 
 | 96 | 		   "BdiReclaimable:     %10lu kB\n" | 
 | 97 | 		   "BdiDirtyThresh:     %10lu kB\n" | 
 | 98 | 		   "DirtyThresh:        %10lu kB\n" | 
 | 99 | 		   "BackgroundThresh:   %10lu kB\n" | 
| Wu Fengguang | c8e28ce | 2011-01-23 10:07:47 -0600 | [diff] [blame] | 100 | 		   "BdiDirtied:         %10lu kB\n" | 
| Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 101 | 		   "BdiWritten:         %10lu kB\n" | 
 | 102 | 		   "BdiWriteBandwidth:  %10lu kBps\n" | 
 | 103 | 		   "b_dirty:            %10lu\n" | 
 | 104 | 		   "b_io:               %10lu\n" | 
 | 105 | 		   "b_more_io:          %10lu\n" | 
 | 106 | 		   "bdi_list:           %10u\n" | 
 | 107 | 		   "state:              %10lx\n", | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 108 | 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), | 
 | 109 | 		   (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), | 
| Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 110 | 		   K(bdi_thresh), | 
 | 111 | 		   K(dirty_thresh), | 
 | 112 | 		   K(background_thresh), | 
| Wu Fengguang | c8e28ce | 2011-01-23 10:07:47 -0600 | [diff] [blame] | 113 | 		   (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), | 
| Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 114 | 		   (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), | 
| Wu Fengguang | 00821b0 | 2010-08-29 11:28:45 -0600 | [diff] [blame] | 115 | 		   (unsigned long) K(bdi->write_bandwidth), | 
| Jan Kara | f7d2b1e | 2010-12-08 22:44:24 -0600 | [diff] [blame] | 116 | 		   nr_dirty, | 
 | 117 | 		   nr_io, | 
 | 118 | 		   nr_more_io, | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 119 | 		   !list_empty(&bdi->bdi_list), bdi->state); | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 120 | #undef K | 
 | 121 |  | 
 | 122 | 	return 0; | 
 | 123 | } | 
 | 124 |  | 
 | 125 | static int bdi_debug_stats_open(struct inode *inode, struct file *file) | 
 | 126 | { | 
 | 127 | 	return single_open(file, bdi_debug_stats_show, inode->i_private); | 
 | 128 | } | 
 | 129 |  | 
 | 130 | static const struct file_operations bdi_debug_stats_fops = { | 
 | 131 | 	.open		= bdi_debug_stats_open, | 
 | 132 | 	.read		= seq_read, | 
 | 133 | 	.llseek		= seq_lseek, | 
 | 134 | 	.release	= single_release, | 
 | 135 | }; | 
 | 136 |  | 
 | 137 | static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) | 
 | 138 | { | 
 | 139 | 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); | 
 | 140 | 	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, | 
 | 141 | 					       bdi, &bdi_debug_stats_fops); | 
 | 142 | } | 
 | 143 |  | 
 | 144 | static void bdi_debug_unregister(struct backing_dev_info *bdi) | 
 | 145 | { | 
 | 146 | 	debugfs_remove(bdi->debug_stats); | 
 | 147 | 	debugfs_remove(bdi->debug_dir); | 
 | 148 | } | 
 | 149 | #else | 
 | 150 | static inline void bdi_debug_init(void) | 
 | 151 | { | 
 | 152 | } | 
 | 153 | static inline void bdi_debug_register(struct backing_dev_info *bdi, | 
 | 154 | 				      const char *name) | 
 | 155 | { | 
 | 156 | } | 
 | 157 | static inline void bdi_debug_unregister(struct backing_dev_info *bdi) | 
 | 158 | { | 
 | 159 | } | 
 | 160 | #endif | 
 | 161 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 162 | static ssize_t read_ahead_kb_store(struct device *dev, | 
 | 163 | 				  struct device_attribute *attr, | 
 | 164 | 				  const char *buf, size_t count) | 
 | 165 | { | 
 | 166 | 	struct backing_dev_info *bdi = dev_get_drvdata(dev); | 
 | 167 | 	char *end; | 
 | 168 | 	unsigned long read_ahead_kb; | 
 | 169 | 	ssize_t ret = -EINVAL; | 
 | 170 |  | 
 | 171 | 	read_ahead_kb = simple_strtoul(buf, &end, 10); | 
 | 172 | 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { | 
 | 173 | 		bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); | 
 | 174 | 		ret = count; | 
 | 175 | 	} | 
 | 176 | 	return ret; | 
 | 177 | } | 
 | 178 |  | 
 | 179 | #define K(pages) ((pages) << (PAGE_SHIFT - 10)) | 
 | 180 |  | 
 | 181 | #define BDI_SHOW(name, expr)						\ | 
 | 182 | static ssize_t name##_show(struct device *dev,				\ | 
 | 183 | 			   struct device_attribute *attr, char *page)	\ | 
 | 184 | {									\ | 
 | 185 | 	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\ | 
 | 186 | 									\ | 
 | 187 | 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\ | 
 | 188 | } | 
 | 189 |  | 
 | 190 | BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) | 
 | 191 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 192 | static ssize_t min_ratio_store(struct device *dev, | 
 | 193 | 		struct device_attribute *attr, const char *buf, size_t count) | 
 | 194 | { | 
 | 195 | 	struct backing_dev_info *bdi = dev_get_drvdata(dev); | 
 | 196 | 	char *end; | 
 | 197 | 	unsigned int ratio; | 
 | 198 | 	ssize_t ret = -EINVAL; | 
 | 199 |  | 
 | 200 | 	ratio = simple_strtoul(buf, &end, 10); | 
 | 201 | 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { | 
 | 202 | 		ret = bdi_set_min_ratio(bdi, ratio); | 
 | 203 | 		if (!ret) | 
 | 204 | 			ret = count; | 
 | 205 | 	} | 
 | 206 | 	return ret; | 
 | 207 | } | 
 | 208 | BDI_SHOW(min_ratio, bdi->min_ratio) | 
 | 209 |  | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 210 | static ssize_t max_ratio_store(struct device *dev, | 
 | 211 | 		struct device_attribute *attr, const char *buf, size_t count) | 
 | 212 | { | 
 | 213 | 	struct backing_dev_info *bdi = dev_get_drvdata(dev); | 
 | 214 | 	char *end; | 
 | 215 | 	unsigned int ratio; | 
 | 216 | 	ssize_t ret = -EINVAL; | 
 | 217 |  | 
 | 218 | 	ratio = simple_strtoul(buf, &end, 10); | 
 | 219 | 	if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) { | 
 | 220 | 		ret = bdi_set_max_ratio(bdi, ratio); | 
 | 221 | 		if (!ret) | 
 | 222 | 			ret = count; | 
 | 223 | 	} | 
 | 224 | 	return ret; | 
 | 225 | } | 
 | 226 | BDI_SHOW(max_ratio, bdi->max_ratio) | 
 | 227 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 228 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) | 
 | 229 |  | 
 | 230 | static struct device_attribute bdi_dev_attrs[] = { | 
 | 231 | 	__ATTR_RW(read_ahead_kb), | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 232 | 	__ATTR_RW(min_ratio), | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 233 | 	__ATTR_RW(max_ratio), | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 234 | 	__ATTR_NULL, | 
 | 235 | }; | 
 | 236 |  | 
 | 237 | static __init int bdi_class_init(void) | 
 | 238 | { | 
 | 239 | 	bdi_class = class_create(THIS_MODULE, "bdi"); | 
| Anton Blanchard | 1442145 | 2010-04-02 09:46:55 +0200 | [diff] [blame] | 240 | 	if (IS_ERR(bdi_class)) | 
 | 241 | 		return PTR_ERR(bdi_class); | 
 | 242 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 243 | 	bdi_class->dev_attrs = bdi_dev_attrs; | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 244 | 	bdi_debug_init(); | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 245 | 	return 0; | 
 | 246 | } | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 247 | postcore_initcall(bdi_class_init); | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 248 |  | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 249 | static int __init default_bdi_init(void) | 
 | 250 | { | 
 | 251 | 	int err; | 
 | 252 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 253 | 	sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); | 
 | 254 | 	BUG_ON(IS_ERR(sync_supers_tsk)); | 
 | 255 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 256 | 	setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 257 | 	bdi_arm_supers_timer(); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 258 |  | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 259 | 	err = bdi_init(&default_backing_dev_info); | 
 | 260 | 	if (!err) | 
 | 261 | 		bdi_register(&default_backing_dev_info, NULL, "default"); | 
| Jan Kara | 976e48f | 2010-09-21 11:48:55 +0200 | [diff] [blame] | 262 | 	err = bdi_init(&noop_backing_dev_info); | 
| Jens Axboe | 2616015 | 2009-03-17 09:35:06 +0100 | [diff] [blame] | 263 |  | 
 | 264 | 	return err; | 
 | 265 | } | 
 | 266 | subsys_initcall(default_bdi_init); | 
 | 267 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 268 | int bdi_has_dirty_io(struct backing_dev_info *bdi) | 
 | 269 | { | 
 | 270 | 	return wb_has_dirty_io(&bdi->wb); | 
 | 271 | } | 
 | 272 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 273 | /* | 
| Artem Bityutskiy | 6f904ff | 2010-07-25 14:29:11 +0300 | [diff] [blame] | 274 |  * kupdated() used to do this. We cannot do it from the bdi_forker_thread() | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 275 |  * or we risk deadlocking on ->s_umount. The longer term solution would be | 
 | 276 |  * to implement sync_supers_bdi() or similar and simply do it from the | 
| Artem Bityutskiy | 6f904ff | 2010-07-25 14:29:11 +0300 | [diff] [blame] | 277 |  * bdi writeback thread individually. | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 278 |  */ | 
 | 279 | static int bdi_sync_supers(void *unused) | 
 | 280 | { | 
 | 281 | 	set_user_nice(current, 0); | 
 | 282 |  | 
 | 283 | 	while (!kthread_should_stop()) { | 
 | 284 | 		set_current_state(TASK_INTERRUPTIBLE); | 
 | 285 | 		schedule(); | 
 | 286 |  | 
 | 287 | 		/* | 
 | 288 | 		 * Do this periodically, like kupdated() did before. | 
 | 289 | 		 */ | 
 | 290 | 		sync_supers(); | 
 | 291 | 	} | 
 | 292 |  | 
 | 293 | 	return 0; | 
 | 294 | } | 
 | 295 |  | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 296 | void bdi_arm_supers_timer(void) | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 297 | { | 
 | 298 | 	unsigned long next; | 
 | 299 |  | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 300 | 	if (!dirty_writeback_interval) | 
 | 301 | 		return; | 
 | 302 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 303 | 	next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; | 
 | 304 | 	mod_timer(&sync_supers_timer, round_jiffies_up(next)); | 
 | 305 | } | 
 | 306 |  | 
 | 307 | static void sync_supers_timer_fn(unsigned long unused) | 
 | 308 | { | 
 | 309 | 	wake_up_process(sync_supers_tsk); | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 310 | 	bdi_arm_supers_timer(); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 311 | } | 
 | 312 |  | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 313 | static void wakeup_timer_fn(unsigned long data) | 
 | 314 | { | 
 | 315 | 	struct backing_dev_info *bdi = (struct backing_dev_info *)data; | 
 | 316 |  | 
 | 317 | 	spin_lock_bh(&bdi->wb_lock); | 
 | 318 | 	if (bdi->wb.task) { | 
| Artem Bityutskiy | 6033202 | 2010-07-25 14:29:24 +0300 | [diff] [blame] | 319 | 		trace_writeback_wake_thread(bdi); | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 320 | 		wake_up_process(bdi->wb.task); | 
 | 321 | 	} else { | 
 | 322 | 		/* | 
 | 323 | 		 * When bdi tasks are inactive for long time, they are killed. | 
 | 324 | 		 * In this case we have to wake-up the forker thread which | 
 | 325 | 		 * should create and run the bdi thread. | 
 | 326 | 		 */ | 
| Artem Bityutskiy | 6033202 | 2010-07-25 14:29:24 +0300 | [diff] [blame] | 327 | 		trace_writeback_wake_forker_thread(bdi); | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 328 | 		wake_up_process(default_backing_dev_info.wb.task); | 
 | 329 | 	} | 
 | 330 | 	spin_unlock_bh(&bdi->wb_lock); | 
 | 331 | } | 
 | 332 |  | 
 | 333 | /* | 
 | 334 |  * This function is used when the first inode for this bdi is marked dirty. It | 
 | 335 |  * wakes-up the corresponding bdi thread which should then take care of the | 
 | 336 |  * periodic background write-out of dirty inodes. Since the write-out would | 
 | 337 |  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just | 
 | 338 |  * set up a timer which wakes the bdi thread up later. | 
 | 339 |  * | 
 | 340 |  * Note, we wouldn't bother setting up the timer, but this function is on the | 
 | 341 |  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches | 
 | 342 |  * by delaying the wake-up. | 
 | 343 |  */ | 
 | 344 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) | 
 | 345 | { | 
 | 346 | 	unsigned long timeout; | 
 | 347 |  | 
 | 348 | 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10); | 
 | 349 | 	mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); | 
 | 350 | } | 
 | 351 |  | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 352 | /* | 
 | 353 |  * Calculate the longest interval (jiffies) bdi threads are allowed to be | 
 | 354 |  * inactive. | 
 | 355 |  */ | 
 | 356 | static unsigned long bdi_longest_inactive(void) | 
 | 357 | { | 
 | 358 | 	unsigned long interval; | 
 | 359 |  | 
 | 360 | 	interval = msecs_to_jiffies(dirty_writeback_interval * 10); | 
 | 361 | 	return max(5UL * 60 * HZ, interval); | 
 | 362 | } | 
 | 363 |  | 
| Jan Kara | 5a042aa | 2011-09-02 17:04:09 -0600 | [diff] [blame] | 364 | /* | 
 | 365 |  * Clear pending bit and wakeup anybody waiting for flusher thread creation or | 
 | 366 |  * shutdown | 
 | 367 |  */ | 
 | 368 | static void bdi_clear_pending(struct backing_dev_info *bdi) | 
 | 369 | { | 
 | 370 | 	clear_bit(BDI_pending, &bdi->state); | 
 | 371 | 	smp_mb__after_clear_bit(); | 
 | 372 | 	wake_up_bit(&bdi->state, BDI_pending); | 
 | 373 | } | 
 | 374 |  | 
| Artem Bityutskiy | 6f904ff | 2010-07-25 14:29:11 +0300 | [diff] [blame] | 375 | static int bdi_forker_thread(void *ptr) | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 376 | { | 
 | 377 | 	struct bdi_writeback *me = ptr; | 
 | 378 |  | 
| Peter Zijlstra | 766f916 | 2010-10-26 14:22:45 -0700 | [diff] [blame] | 379 | 	current->flags |= PF_SWAPWRITE; | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 380 | 	set_freezable(); | 
 | 381 |  | 
 | 382 | 	/* | 
 | 383 | 	 * Our parent may run at a different priority, just set us to normal | 
 | 384 | 	 */ | 
 | 385 | 	set_user_nice(current, 0); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 386 |  | 
 | 387 | 	for (;;) { | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 388 | 		struct task_struct *task = NULL; | 
| Artem Bityutskiy | 78c40cb | 2010-07-25 14:29:17 +0300 | [diff] [blame] | 389 | 		struct backing_dev_info *bdi; | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 390 | 		enum { | 
 | 391 | 			NO_ACTION,   /* Nothing to do */ | 
 | 392 | 			FORK_THREAD, /* Fork bdi thread */ | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 393 | 			KILL_THREAD, /* Kill inactive bdi thread */ | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 394 | 		} action = NO_ACTION; | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 395 |  | 
 | 396 | 		/* | 
 | 397 | 		 * Temporary measure, we want to make sure we don't see | 
 | 398 | 		 * dirty data on the default backing_dev_info | 
 | 399 | 		 */ | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 400 | 		if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { | 
 | 401 | 			del_timer(&me->wakeup_timer); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 402 | 			wb_do_writeback(me, 0); | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 403 | 		} | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 404 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 405 | 		spin_lock_bh(&bdi_lock); | 
| Jan Kara | 09f40f9 | 2011-09-02 17:04:10 -0600 | [diff] [blame] | 406 | 		/* | 
 | 407 | 		 * In the following loop we are going to check whether we have | 
 | 408 | 		 * some work to do without any synchronization with tasks | 
| Andrew Morton | 20c8c62 | 2011-10-31 17:08:54 -0700 | [diff] [blame] | 409 | 		 * waking us up to do work for them. Set the task state here | 
 | 410 | 		 * so that we don't miss wakeups after verifying conditions. | 
| Jan Kara | 09f40f9 | 2011-09-02 17:04:10 -0600 | [diff] [blame] | 411 | 		 */ | 
| Artem Bityutskiy | c5f7ad2 | 2010-07-25 14:29:13 +0300 | [diff] [blame] | 412 | 		set_current_state(TASK_INTERRUPTIBLE); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 413 |  | 
| Artem Bityutskiy | 78c40cb | 2010-07-25 14:29:17 +0300 | [diff] [blame] | 414 | 		list_for_each_entry(bdi, &bdi_list, bdi_list) { | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 415 | 			bool have_dirty_io; | 
 | 416 |  | 
 | 417 | 			if (!bdi_cap_writeback_dirty(bdi) || | 
 | 418 | 			     bdi_cap_flush_forker(bdi)) | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 419 | 				continue; | 
 | 420 |  | 
| Artem Bityutskiy | 080dcec | 2010-07-25 14:29:16 +0300 | [diff] [blame] | 421 | 			WARN(!test_bit(BDI_registered, &bdi->state), | 
 | 422 | 			     "bdi %p/%s is not registered!\n", bdi, bdi->name); | 
 | 423 |  | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 424 | 			have_dirty_io = !list_empty(&bdi->work_list) || | 
 | 425 | 					wb_has_dirty_io(&bdi->wb); | 
| Artem Bityutskiy | 78c40cb | 2010-07-25 14:29:17 +0300 | [diff] [blame] | 426 |  | 
 | 427 | 			/* | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 428 | 			 * If the bdi has work to do, but the thread does not | 
 | 429 | 			 * exist - create it. | 
| Artem Bityutskiy | 78c40cb | 2010-07-25 14:29:17 +0300 | [diff] [blame] | 430 | 			 */ | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 431 | 			if (!bdi->wb.task && have_dirty_io) { | 
 | 432 | 				/* | 
 | 433 | 				 * Set the pending bit - if someone will try to | 
 | 434 | 				 * unregister this bdi - it'll wait on this bit. | 
 | 435 | 				 */ | 
 | 436 | 				set_bit(BDI_pending, &bdi->state); | 
 | 437 | 				action = FORK_THREAD; | 
 | 438 | 				break; | 
 | 439 | 			} | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 440 |  | 
| Jens Axboe | 6bf05d0 | 2010-08-04 13:34:31 +0200 | [diff] [blame] | 441 | 			spin_lock(&bdi->wb_lock); | 
 | 442 |  | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 443 | 			/* | 
 | 444 | 			 * If there is no work to do and the bdi thread was | 
 | 445 | 			 * inactive long enough - kill it. The wb_lock is taken | 
 | 446 | 			 * to make sure no-one adds more work to this bdi and | 
 | 447 | 			 * wakes the bdi thread up. | 
 | 448 | 			 */ | 
 | 449 | 			if (bdi->wb.task && !have_dirty_io && | 
 | 450 | 			    time_after(jiffies, bdi->wb.last_active + | 
 | 451 | 						bdi_longest_inactive())) { | 
 | 452 | 				task = bdi->wb.task; | 
 | 453 | 				bdi->wb.task = NULL; | 
 | 454 | 				spin_unlock(&bdi->wb_lock); | 
 | 455 | 				set_bit(BDI_pending, &bdi->state); | 
 | 456 | 				action = KILL_THREAD; | 
 | 457 | 				break; | 
 | 458 | 			} | 
| Jens Axboe | 6bf05d0 | 2010-08-04 13:34:31 +0200 | [diff] [blame] | 459 | 			spin_unlock(&bdi->wb_lock); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 460 | 		} | 
| Artem Bityutskiy | 080dcec | 2010-07-25 14:29:16 +0300 | [diff] [blame] | 461 | 		spin_unlock_bh(&bdi_lock); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 462 |  | 
| Artem Bityutskiy | c4ec790 | 2010-07-25 14:29:14 +0300 | [diff] [blame] | 463 | 		/* Keep working if default bdi still has things to do */ | 
 | 464 | 		if (!list_empty(&me->bdi->work_list)) | 
 | 465 | 			__set_current_state(TASK_RUNNING); | 
 | 466 |  | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 467 | 		switch (action) { | 
 | 468 | 		case FORK_THREAD: | 
 | 469 | 			__set_current_state(TASK_RUNNING); | 
| Artem Bityutskiy | 6628bc7 | 2010-08-27 09:15:09 +0200 | [diff] [blame] | 470 | 			task = kthread_create(bdi_writeback_thread, &bdi->wb, | 
 | 471 | 					      "flush-%s", dev_name(bdi->dev)); | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 472 | 			if (IS_ERR(task)) { | 
 | 473 | 				/* | 
 | 474 | 				 * If thread creation fails, force writeout of | 
| Wu Fengguang | d46db3d | 2011-05-04 19:54:37 -0600 | [diff] [blame] | 475 | 				 * the bdi from the thread. Hopefully 1024 is | 
 | 476 | 				 * large enough for efficient IO. | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 477 | 				 */ | 
| Curt Wohlgemuth | 0e175a1 | 2011-10-07 21:54:10 -0600 | [diff] [blame] | 478 | 				writeback_inodes_wb(&bdi->wb, 1024, | 
 | 479 | 						    WB_REASON_FORKER_THREAD); | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 480 | 			} else { | 
 | 481 | 				/* | 
 | 482 | 				 * The spinlock makes sure we do not lose | 
 | 483 | 				 * wake-ups when racing with 'bdi_queue_work()'. | 
| Artem Bityutskiy | 6628bc7 | 2010-08-27 09:15:09 +0200 | [diff] [blame] | 484 | 				 * And as soon as the bdi thread is visible, we | 
 | 485 | 				 * can start it. | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 486 | 				 */ | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 487 | 				spin_lock_bh(&bdi->wb_lock); | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 488 | 				bdi->wb.task = task; | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 489 | 				spin_unlock_bh(&bdi->wb_lock); | 
| Artem Bityutskiy | 6628bc7 | 2010-08-27 09:15:09 +0200 | [diff] [blame] | 490 | 				wake_up_process(task); | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 491 | 			} | 
| Jan Kara | 5a042aa | 2011-09-02 17:04:09 -0600 | [diff] [blame] | 492 | 			bdi_clear_pending(bdi); | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 493 | 			break; | 
 | 494 |  | 
 | 495 | 		case KILL_THREAD: | 
 | 496 | 			__set_current_state(TASK_RUNNING); | 
 | 497 | 			kthread_stop(task); | 
| Jan Kara | 5a042aa | 2011-09-02 17:04:09 -0600 | [diff] [blame] | 498 | 			bdi_clear_pending(bdi); | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 499 | 			break; | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 500 |  | 
| Artem Bityutskiy | adf3924 | 2010-07-25 14:29:19 +0300 | [diff] [blame] | 501 | 		case NO_ACTION: | 
| Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 502 | 			if (!wb_has_dirty_io(me) || !dirty_writeback_interval) | 
 | 503 | 				/* | 
 | 504 | 				 * There are no dirty data. The only thing we | 
 | 505 | 				 * should now care about is checking for | 
 | 506 | 				 * inactive bdi threads and killing them. Thus, | 
 | 507 | 				 * let's sleep for longer time, save energy and | 
 | 508 | 				 * be friendly for battery-driven devices. | 
 | 509 | 				 */ | 
 | 510 | 				schedule_timeout(bdi_longest_inactive()); | 
| Jens Axboe | 6423104 | 2010-05-21 20:00:35 +0200 | [diff] [blame] | 511 | 			else | 
| Artem Bityutskiy | 253c34e | 2010-07-25 14:29:21 +0300 | [diff] [blame] | 512 | 				schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 513 | 			try_to_freeze(); | 
| Jan Kara | 5a042aa | 2011-09-02 17:04:09 -0600 | [diff] [blame] | 514 | 			break; | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 515 | 		} | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 516 | 	} | 
 | 517 |  | 
 | 518 | 	return 0; | 
 | 519 | } | 
 | 520 |  | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 521 | /* | 
 | 522 |  * Remove bdi from bdi_list, and ensure that it is no longer visible | 
 | 523 |  */ | 
 | 524 | static void bdi_remove_from_list(struct backing_dev_info *bdi) | 
 | 525 | { | 
 | 526 | 	spin_lock_bh(&bdi_lock); | 
 | 527 | 	list_del_rcu(&bdi->bdi_list); | 
 | 528 | 	spin_unlock_bh(&bdi_lock); | 
 | 529 |  | 
| Mikulas Patocka | ef32308 | 2011-07-23 20:44:24 +0200 | [diff] [blame] | 530 | 	synchronize_rcu_expedited(); | 
| Jens Axboe | cfc4ba5 | 2009-09-14 13:12:40 +0200 | [diff] [blame] | 531 | } | 
 | 532 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 533 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, | 
 | 534 | 		const char *fmt, ...) | 
 | 535 | { | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 536 | 	va_list args; | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 537 | 	struct device *dev; | 
 | 538 |  | 
| Andrew Morton | 69fc208 | 2008-12-09 13:14:06 -0800 | [diff] [blame] | 539 | 	if (bdi->dev)	/* The driver needs to use separate queues per device */ | 
| Artem Bityutskiy | c284de6 | 2010-07-25 14:29:25 +0300 | [diff] [blame] | 540 | 		return 0; | 
| Kay Sievers | f1d0b06 | 2008-12-02 10:31:50 -0800 | [diff] [blame] | 541 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 542 | 	va_start(args, fmt); | 
| Greg Kroah-Hartman | 19051c5 | 2008-05-15 13:44:08 -0700 | [diff] [blame] | 543 | 	dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 544 | 	va_end(args); | 
| Artem Bityutskiy | c284de6 | 2010-07-25 14:29:25 +0300 | [diff] [blame] | 545 | 	if (IS_ERR(dev)) | 
 | 546 | 		return PTR_ERR(dev); | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 547 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 548 | 	bdi->dev = dev; | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 549 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 550 | 	/* | 
 | 551 | 	 * Just start the forker thread for our default backing_dev_info, | 
 | 552 | 	 * and add other bdi's to the list. They will get a thread created | 
 | 553 | 	 * on-demand when they need it. | 
 | 554 | 	 */ | 
 | 555 | 	if (bdi_cap_flush_forker(bdi)) { | 
 | 556 | 		struct bdi_writeback *wb = &bdi->wb; | 
 | 557 |  | 
| Artem Bityutskiy | 6f904ff | 2010-07-25 14:29:11 +0300 | [diff] [blame] | 558 | 		wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 559 | 						dev_name(dev)); | 
| Artem Bityutskiy | c284de6 | 2010-07-25 14:29:25 +0300 | [diff] [blame] | 560 | 		if (IS_ERR(wb->task)) | 
 | 561 | 			return PTR_ERR(wb->task); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 562 | 	} | 
 | 563 |  | 
 | 564 | 	bdi_debug_register(bdi, dev_name(dev)); | 
| Jens Axboe | 500b067 | 2009-09-09 09:10:25 +0200 | [diff] [blame] | 565 | 	set_bit(BDI_registered, &bdi->state); | 
| Artem Bityutskiy | c284de6 | 2010-07-25 14:29:25 +0300 | [diff] [blame] | 566 |  | 
 | 567 | 	spin_lock_bh(&bdi_lock); | 
 | 568 | 	list_add_tail_rcu(&bdi->bdi_list, &bdi_list); | 
 | 569 | 	spin_unlock_bh(&bdi_lock); | 
 | 570 |  | 
| Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 571 | 	trace_writeback_bdi_register(bdi); | 
| Artem Bityutskiy | c284de6 | 2010-07-25 14:29:25 +0300 | [diff] [blame] | 572 | 	return 0; | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 573 | } | 
 | 574 | EXPORT_SYMBOL(bdi_register); | 
 | 575 |  | 
 | 576 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) | 
 | 577 | { | 
 | 578 | 	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); | 
 | 579 | } | 
 | 580 | EXPORT_SYMBOL(bdi_register_dev); | 
 | 581 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 582 | /* | 
 | 583 |  * Remove bdi from the global list and shutdown any threads we have running | 
 | 584 |  */ | 
 | 585 | static void bdi_wb_shutdown(struct backing_dev_info *bdi) | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 586 | { | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 587 | 	if (!bdi_cap_writeback_dirty(bdi)) | 
 | 588 | 		return; | 
 | 589 |  | 
 | 590 | 	/* | 
| Artem Bityutskiy | fff5b85 | 2010-07-25 14:29:20 +0300 | [diff] [blame] | 591 | 	 * Make sure nobody finds us on the bdi_list anymore | 
 | 592 | 	 */ | 
 | 593 | 	bdi_remove_from_list(bdi); | 
 | 594 |  | 
 | 595 | 	/* | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 596 | 	 * If setup is pending, wait for that to complete first | 
 | 597 | 	 */ | 
 | 598 | 	wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait, | 
 | 599 | 			TASK_UNINTERRUPTIBLE); | 
 | 600 |  | 
 | 601 | 	/* | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 602 | 	 * Finally, kill the kernel thread. We don't need to be RCU | 
| Romit Dasgupta | c62b17a | 2009-11-12 13:08:11 +0100 | [diff] [blame] | 603 | 	 * safe anymore, since the bdi is gone from visibility. Force | 
 | 604 | 	 * unfreeze of the thread before calling kthread_stop(), otherwise | 
 | 605 | 	 * it would never exet if it is currently stuck in the refrigerator. | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 606 | 	 */ | 
| Christoph Hellwig | c1955ce | 2010-06-19 23:08:06 +0200 | [diff] [blame] | 607 | 	if (bdi->wb.task) { | 
 | 608 | 		thaw_process(bdi->wb.task); | 
 | 609 | 		kthread_stop(bdi->wb.task); | 
| Romit Dasgupta | c62b17a | 2009-11-12 13:08:11 +0100 | [diff] [blame] | 610 | 	} | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 611 | } | 
 | 612 |  | 
| Jens Axboe | 592b09a | 2009-10-29 11:46:12 +0100 | [diff] [blame] | 613 | /* | 
 | 614 |  * This bdi is going away now, make sure that no super_blocks point to it | 
 | 615 |  */ | 
 | 616 | static void bdi_prune_sb(struct backing_dev_info *bdi) | 
 | 617 | { | 
 | 618 | 	struct super_block *sb; | 
 | 619 |  | 
 | 620 | 	spin_lock(&sb_lock); | 
 | 621 | 	list_for_each_entry(sb, &super_blocks, s_list) { | 
 | 622 | 		if (sb->s_bdi == bdi) | 
| Jens Axboe | 95f2860 | 2011-03-17 11:13:12 +0100 | [diff] [blame] | 623 | 			sb->s_bdi = &default_backing_dev_info; | 
| Jens Axboe | 592b09a | 2009-10-29 11:46:12 +0100 | [diff] [blame] | 624 | 	} | 
 | 625 | 	spin_unlock(&sb_lock); | 
 | 626 | } | 
 | 627 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 628 | void bdi_unregister(struct backing_dev_info *bdi) | 
 | 629 | { | 
 | 630 | 	if (bdi->dev) { | 
| Peter Zijlstra | ccb6108 | 2011-07-25 17:11:57 -0700 | [diff] [blame] | 631 | 		bdi_set_min_ratio(bdi, 0); | 
| Dave Chinner | 455b286 | 2010-07-07 13:24:06 +1000 | [diff] [blame] | 632 | 		trace_writeback_bdi_unregister(bdi); | 
| Jens Axboe | 8c4db33 | 2009-11-03 20:18:44 +0100 | [diff] [blame] | 633 | 		bdi_prune_sb(bdi); | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 634 | 		del_timer_sync(&bdi->wb.wakeup_timer); | 
| Jens Axboe | 8c4db33 | 2009-11-03 20:18:44 +0100 | [diff] [blame] | 635 |  | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 636 | 		if (!bdi_cap_flush_forker(bdi)) | 
 | 637 | 			bdi_wb_shutdown(bdi); | 
| Miklos Szeredi | 76f1418 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 638 | 		bdi_debug_unregister(bdi); | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 639 | 		device_unregister(bdi->dev); | 
 | 640 | 		bdi->dev = NULL; | 
 | 641 | 	} | 
 | 642 | } | 
 | 643 | EXPORT_SYMBOL(bdi_unregister); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 644 |  | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 645 | static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) | 
 | 646 | { | 
 | 647 | 	memset(wb, 0, sizeof(*wb)); | 
 | 648 |  | 
 | 649 | 	wb->bdi = bdi; | 
 | 650 | 	wb->last_old_flush = jiffies; | 
 | 651 | 	INIT_LIST_HEAD(&wb->b_dirty); | 
 | 652 | 	INIT_LIST_HEAD(&wb->b_io); | 
 | 653 | 	INIT_LIST_HEAD(&wb->b_more_io); | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 654 | 	spin_lock_init(&wb->list_lock); | 
| Artem Bityutskiy | 6467716 | 2010-07-25 14:29:22 +0300 | [diff] [blame] | 655 | 	setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); | 
 | 656 | } | 
 | 657 |  | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 658 | /* | 
 | 659 |  * Initial write bandwidth: 100 MB/s | 
 | 660 |  */ | 
 | 661 | #define INIT_BW		(100 << (20 - PAGE_SHIFT)) | 
 | 662 |  | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 663 | int bdi_init(struct backing_dev_info *bdi) | 
 | 664 | { | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 665 | 	int i, err; | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 666 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 667 | 	bdi->dev = NULL; | 
 | 668 |  | 
| Peter Zijlstra | 189d3c4 | 2008-04-30 00:54:35 -0700 | [diff] [blame] | 669 | 	bdi->min_ratio = 0; | 
| Peter Zijlstra | a42dde0 | 2008-04-30 00:54:36 -0700 | [diff] [blame] | 670 | 	bdi->max_ratio = 100; | 
 | 671 | 	bdi->max_prop_frac = PROP_FRAC_BASE; | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 672 | 	spin_lock_init(&bdi->wb_lock); | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 673 | 	INIT_LIST_HEAD(&bdi->bdi_list); | 
| Jens Axboe | 03ba378 | 2009-09-09 09:08:54 +0200 | [diff] [blame] | 674 | 	INIT_LIST_HEAD(&bdi->work_list); | 
 | 675 |  | 
 | 676 | 	bdi_wb_init(&bdi->wb, bdi); | 
 | 677 |  | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 678 | 	for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { | 
| Peter Zijlstra | ea31951 | 2008-12-26 15:08:55 +0100 | [diff] [blame] | 679 | 		err = percpu_counter_init(&bdi->bdi_stat[i], 0); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 680 | 		if (err) | 
 | 681 | 			goto err; | 
 | 682 | 	} | 
 | 683 |  | 
 | 684 | 	bdi->dirty_exceeded = 0; | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 685 |  | 
 | 686 | 	bdi->bw_time_stamp = jiffies; | 
 | 687 | 	bdi->written_stamp = 0; | 
 | 688 |  | 
| Wu Fengguang | 7381131 | 2011-08-26 15:53:24 -0600 | [diff] [blame] | 689 | 	bdi->balanced_dirty_ratelimit = INIT_BW; | 
| Wu Fengguang | be3ffa2 | 2011-06-12 10:51:31 -0600 | [diff] [blame] | 690 | 	bdi->dirty_ratelimit = INIT_BW; | 
| Wu Fengguang | e98be2d | 2010-08-29 11:22:30 -0600 | [diff] [blame] | 691 | 	bdi->write_bandwidth = INIT_BW; | 
 | 692 | 	bdi->avg_write_bandwidth = INIT_BW; | 
 | 693 |  | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 694 | 	err = prop_local_init_percpu(&bdi->completions); | 
 | 695 |  | 
 | 696 | 	if (err) { | 
 | 697 | err: | 
| Denis Cheng | 4b01a0b | 2007-12-04 23:45:07 -0800 | [diff] [blame] | 698 | 		while (i--) | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 699 | 			percpu_counter_destroy(&bdi->bdi_stat[i]); | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 700 | 	} | 
 | 701 |  | 
 | 702 | 	return err; | 
 | 703 | } | 
 | 704 | EXPORT_SYMBOL(bdi_init); | 
 | 705 |  | 
 | 706 | void bdi_destroy(struct backing_dev_info *bdi) | 
 | 707 | { | 
 | 708 | 	int i; | 
 | 709 |  | 
| Jens Axboe | ce5f8e7 | 2009-09-14 12:57:56 +0200 | [diff] [blame] | 710 | 	/* | 
 | 711 | 	 * Splice our entries to the default_backing_dev_info, if this | 
 | 712 | 	 * bdi disappears | 
 | 713 | 	 */ | 
 | 714 | 	if (bdi_has_dirty_io(bdi)) { | 
 | 715 | 		struct bdi_writeback *dst = &default_backing_dev_info.wb; | 
 | 716 |  | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 717 | 		bdi_lock_two(&bdi->wb, dst); | 
| Jens Axboe | ce5f8e7 | 2009-09-14 12:57:56 +0200 | [diff] [blame] | 718 | 		list_splice(&bdi->wb.b_dirty, &dst->b_dirty); | 
 | 719 | 		list_splice(&bdi->wb.b_io, &dst->b_io); | 
 | 720 | 		list_splice(&bdi->wb.b_more_io, &dst->b_more_io); | 
| Christoph Hellwig | f758eea | 2011-04-21 18:19:44 -0600 | [diff] [blame] | 721 | 		spin_unlock(&bdi->wb.list_lock); | 
 | 722 | 		spin_unlock(&dst->list_lock); | 
| Jens Axboe | ce5f8e7 | 2009-09-14 12:57:56 +0200 | [diff] [blame] | 723 | 	} | 
| Jens Axboe | 66f3b8e | 2009-09-02 09:19:46 +0200 | [diff] [blame] | 724 |  | 
| Peter Zijlstra | cf0ca9f | 2008-04-30 00:54:32 -0700 | [diff] [blame] | 725 | 	bdi_unregister(bdi); | 
 | 726 |  | 
| Rabin Vincent | 7a401a9 | 2011-11-11 13:29:04 +0100 | [diff] [blame] | 727 | 	/* | 
 | 728 | 	 * If bdi_unregister() had already been called earlier, the | 
 | 729 | 	 * wakeup_timer could still be armed because bdi_prune_sb() | 
 | 730 | 	 * can race with the bdi_wakeup_thread_delayed() calls from | 
 | 731 | 	 * __mark_inode_dirty(). | 
 | 732 | 	 */ | 
 | 733 | 	del_timer_sync(&bdi->wb.wakeup_timer); | 
 | 734 |  | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 735 | 	for (i = 0; i < NR_BDI_STAT_ITEMS; i++) | 
 | 736 | 		percpu_counter_destroy(&bdi->bdi_stat[i]); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 737 |  | 
 | 738 | 	prop_local_destroy_percpu(&bdi->completions); | 
| Peter Zijlstra | b2e8fb6 | 2007-10-16 23:25:47 -0700 | [diff] [blame] | 739 | } | 
 | 740 | EXPORT_SYMBOL(bdi_destroy); | 
 | 741 |  | 
| Jens Axboe | c3c5320 | 2010-04-22 11:37:01 +0200 | [diff] [blame] | 742 | /* | 
 | 743 |  * For use from filesystems to quickly init and register a bdi associated | 
 | 744 |  * with dirty writeback | 
 | 745 |  */ | 
 | 746 | int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, | 
 | 747 | 			   unsigned int cap) | 
 | 748 | { | 
 | 749 | 	char tmp[32]; | 
 | 750 | 	int err; | 
 | 751 |  | 
 | 752 | 	bdi->name = name; | 
 | 753 | 	bdi->capabilities = cap; | 
 | 754 | 	err = bdi_init(bdi); | 
 | 755 | 	if (err) | 
 | 756 | 		return err; | 
 | 757 |  | 
 | 758 | 	sprintf(tmp, "%.28s%s", name, "-%d"); | 
 | 759 | 	err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); | 
 | 760 | 	if (err) { | 
 | 761 | 		bdi_destroy(bdi); | 
 | 762 | 		return err; | 
 | 763 | 	} | 
 | 764 |  | 
 | 765 | 	return 0; | 
 | 766 | } | 
 | 767 | EXPORT_SYMBOL(bdi_setup_and_register); | 
 | 768 |  | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 769 | static wait_queue_head_t congestion_wqh[2] = { | 
 | 770 | 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 
 | 771 | 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 
 | 772 | 	}; | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 773 | static atomic_t nr_bdi_congested[2]; | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 774 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 775 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 776 | { | 
 | 777 | 	enum bdi_state bit; | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 778 | 	wait_queue_head_t *wqh = &congestion_wqh[sync]; | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 779 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 780 | 	bit = sync ? BDI_sync_congested : BDI_async_congested; | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 781 | 	if (test_and_clear_bit(bit, &bdi->state)) | 
 | 782 | 		atomic_dec(&nr_bdi_congested[sync]); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 783 | 	smp_mb__after_clear_bit(); | 
 | 784 | 	if (waitqueue_active(wqh)) | 
 | 785 | 		wake_up(wqh); | 
 | 786 | } | 
 | 787 | EXPORT_SYMBOL(clear_bdi_congested); | 
 | 788 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 789 | void set_bdi_congested(struct backing_dev_info *bdi, int sync) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 790 | { | 
 | 791 | 	enum bdi_state bit; | 
 | 792 |  | 
| Jens Axboe | 1faa16d | 2009-04-06 14:48:01 +0200 | [diff] [blame] | 793 | 	bit = sync ? BDI_sync_congested : BDI_async_congested; | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 794 | 	if (!test_and_set_bit(bit, &bdi->state)) | 
 | 795 | 		atomic_inc(&nr_bdi_congested[sync]); | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 796 | } | 
 | 797 | EXPORT_SYMBOL(set_bdi_congested); | 
 | 798 |  | 
 | 799 | /** | 
 | 800 |  * congestion_wait - wait for a backing_dev to become uncongested | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 801 |  * @sync: SYNC or ASYNC IO | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 802 |  * @timeout: timeout in jiffies | 
 | 803 |  * | 
 | 804 |  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit | 
 | 805 |  * write congestion.  If no backing_devs are congested then just wait for the | 
 | 806 |  * next write to be completed. | 
 | 807 |  */ | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 808 | long congestion_wait(int sync, long timeout) | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 809 | { | 
 | 810 | 	long ret; | 
| Mel Gorman | 52bb919 | 2010-10-26 14:21:41 -0700 | [diff] [blame] | 811 | 	unsigned long start = jiffies; | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 812 | 	DEFINE_WAIT(wait); | 
| Jens Axboe | 8aa7e84 | 2009-07-09 14:52:32 +0200 | [diff] [blame] | 813 | 	wait_queue_head_t *wqh = &congestion_wqh[sync]; | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 814 |  | 
 | 815 | 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | 
 | 816 | 	ret = io_schedule_timeout(timeout); | 
 | 817 | 	finish_wait(wqh, &wait); | 
| Mel Gorman | 52bb919 | 2010-10-26 14:21:41 -0700 | [diff] [blame] | 818 |  | 
 | 819 | 	trace_writeback_congestion_wait(jiffies_to_usecs(timeout), | 
 | 820 | 					jiffies_to_usecs(jiffies - start)); | 
 | 821 |  | 
| Andrew Morton | 3fcfab1 | 2006-10-19 23:28:16 -0700 | [diff] [blame] | 822 | 	return ret; | 
 | 823 | } | 
 | 824 | EXPORT_SYMBOL(congestion_wait); | 
| Peter Zijlstra | 04fbfdc | 2007-10-16 23:25:50 -0700 | [diff] [blame] | 825 |  | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 826 | /** | 
 | 827 |  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes | 
 | 828 |  * @zone: A zone to check if it is heavily congested | 
 | 829 |  * @sync: SYNC or ASYNC IO | 
 | 830 |  * @timeout: timeout in jiffies | 
 | 831 |  * | 
 | 832 |  * In the event of a congested backing_dev (any backing_dev) and the given | 
 | 833 |  * @zone has experienced recent congestion, this waits for up to @timeout | 
 | 834 |  * jiffies for either a BDI to exit congestion of the given @sync queue | 
 | 835 |  * or a write to complete. | 
 | 836 |  * | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 837 |  * In the absence of zone congestion, cond_resched() is called to yield | 
| Mel Gorman | 0e093d99 | 2010-10-26 14:21:45 -0700 | [diff] [blame] | 838 |  * the processor if necessary but otherwise does not sleep. | 
 | 839 |  * | 
 | 840 |  * The return value is 0 if the sleep is for the full timeout. Otherwise, | 
 | 841 |  * it is the number of jiffies that were still remaining when the function | 
 | 842 |  * returned. return_value == timeout implies the function did not sleep. | 
 | 843 |  */ | 
 | 844 | long wait_iff_congested(struct zone *zone, int sync, long timeout) | 
 | 845 | { | 
 | 846 | 	long ret; | 
 | 847 | 	unsigned long start = jiffies; | 
 | 848 | 	DEFINE_WAIT(wait); | 
 | 849 | 	wait_queue_head_t *wqh = &congestion_wqh[sync]; | 
 | 850 |  | 
 | 851 | 	/* | 
 | 852 | 	 * If there is no congestion, or heavy congestion is not being | 
 | 853 | 	 * encountered in the current zone, yield if necessary instead | 
 | 854 | 	 * of sleeping on the congestion queue | 
 | 855 | 	 */ | 
 | 856 | 	if (atomic_read(&nr_bdi_congested[sync]) == 0 || | 
 | 857 | 			!zone_is_reclaim_congested(zone)) { | 
 | 858 | 		cond_resched(); | 
 | 859 |  | 
 | 860 | 		/* In case we scheduled, work out time remaining */ | 
 | 861 | 		ret = timeout - (jiffies - start); | 
 | 862 | 		if (ret < 0) | 
 | 863 | 			ret = 0; | 
 | 864 |  | 
 | 865 | 		goto out; | 
 | 866 | 	} | 
 | 867 |  | 
 | 868 | 	/* Sleep until uncongested or a write happens */ | 
 | 869 | 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | 
 | 870 | 	ret = io_schedule_timeout(timeout); | 
 | 871 | 	finish_wait(wqh, &wait); | 
 | 872 |  | 
 | 873 | out: | 
 | 874 | 	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | 
 | 875 | 					jiffies_to_usecs(jiffies - start)); | 
 | 876 |  | 
 | 877 | 	return ret; | 
 | 878 | } | 
 | 879 | EXPORT_SYMBOL(wait_iff_congested); |