| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * include/linux/writeback.h | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | */ | 
|  | 4 | #ifndef WRITEBACK_H | 
|  | 5 | #define WRITEBACK_H | 
|  | 6 |  | 
| Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 7 | #include <linux/sched.h> | 
|  | 8 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | struct backing_dev_info; | 
|  | 10 |  | 
|  | 11 | extern spinlock_t inode_lock; | 
|  | 12 | extern struct list_head inode_in_use; | 
|  | 13 | extern struct list_head inode_unused; | 
|  | 14 |  | 
|  | 15 | /* | 
|  | 16 | * Yes, writeback.h requires sched.h | 
|  | 17 | * No, sched.h is not included from here. | 
|  | 18 | */ | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 19 | static inline int task_is_pdflush(struct task_struct *task) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | { | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 21 | return task->flags & PF_FLUSHER; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | } | 
|  | 23 |  | 
| Jens Axboe | 22e2c50 | 2005-06-27 10:55:12 +0200 | [diff] [blame] | 24 | #define current_is_pdflush()	task_is_pdflush(current) | 
|  | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | /* | 
|  | 27 | * fs/fs-writeback.c | 
|  | 28 | */ | 
|  | 29 | enum writeback_sync_modes { | 
|  | 30 | WB_SYNC_NONE,	/* Don't wait on anything */ | 
|  | 31 | WB_SYNC_ALL,	/* Wait on every mapping */ | 
|  | 32 | WB_SYNC_HOLD,	/* Hold the inode on sb_dirty for sys_sync() */ | 
|  | 33 | }; | 
|  | 34 |  | 
|  | 35 | /* | 
|  | 36 | * A control structure which tells the writeback code what to do.  These are | 
|  | 37 | * always on the stack, and hence need no locking.  They are always initialised | 
|  | 38 | * in a manner such that unspecified fields are set to zero. | 
|  | 39 | */ | 
|  | 40 | struct writeback_control { | 
|  | 41 | struct backing_dev_info *bdi;	/* If !NULL, only write back this | 
|  | 42 | queue */ | 
|  | 43 | enum writeback_sync_modes sync_mode; | 
|  | 44 | unsigned long *older_than_this;	/* If !NULL, only write back inodes | 
|  | 45 | older than this */ | 
|  | 46 | long nr_to_write;		/* Write this many pages, and decrement | 
|  | 47 | this for each page written */ | 
|  | 48 | long pages_skipped;		/* Pages which were not written */ | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * For a_ops->writepages(): is start or end are non-zero then this is | 
|  | 52 | * a hint that the filesystem need only write out the pages inside that | 
|  | 53 | * byterange.  The byte at `end' is included in the writeout request. | 
|  | 54 | */ | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 55 | loff_t range_start; | 
|  | 56 | loff_t range_end; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| Andrew Morton | 22905f7 | 2005-11-16 15:07:01 -0800 | [diff] [blame] | 58 | unsigned nonblocking:1;		/* Don't get stuck on request queues */ | 
|  | 59 | unsigned encountered_congestion:1; /* An output: a queue is full */ | 
|  | 60 | unsigned for_kupdate:1;		/* A kupdate writeback */ | 
|  | 61 | unsigned for_reclaim:1;		/* Invoked from the page allocator */ | 
|  | 62 | unsigned for_writepages:1;	/* This is a writepages() call */ | 
| OGAWA Hirofumi | 111ebb6 | 2006-06-23 02:03:26 -0700 | [diff] [blame] | 63 | unsigned range_cyclic:1;	/* range_start is cyclic */ | 
| Trond Myklebust | c63c7b0 | 2007-04-02 19:29:52 -0400 | [diff] [blame] | 64 |  | 
|  | 65 | void *fs_private;		/* For use by ->writepages() */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | }; | 
|  | 67 |  | 
|  | 68 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | * fs/fs-writeback.c | 
|  | 70 | */ | 
|  | 71 | void writeback_inodes(struct writeback_control *wbc); | 
|  | 72 | void wake_up_inode(struct inode *inode); | 
|  | 73 | int inode_wait(void *); | 
|  | 74 | void sync_inodes_sb(struct super_block *, int wait); | 
|  | 75 | void sync_inodes(int wait); | 
|  | 76 |  | 
|  | 77 | /* writeback.h requires fs.h; it, too, is not included from here. */ | 
|  | 78 | static inline void wait_on_inode(struct inode *inode) | 
|  | 79 | { | 
|  | 80 | might_sleep(); | 
|  | 81 | wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, | 
|  | 82 | TASK_UNINTERRUPTIBLE); | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | /* | 
|  | 86 | * mm/page-writeback.c | 
|  | 87 | */ | 
| Pekka J Enberg | 687a21c | 2005-06-28 20:44:55 -0700 | [diff] [blame] | 88 | int wakeup_pdflush(long nr_pages); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | void laptop_io_completion(void); | 
|  | 90 | void laptop_sync_completion(void); | 
| Andrew Morton | 232ea4d | 2007-02-28 20:13:21 -0800 | [diff] [blame] | 91 | void throttle_vm_writeout(gfp_t gfp_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 |  | 
|  | 93 | /* These are exported to sysctl. */ | 
|  | 94 | extern int dirty_background_ratio; | 
|  | 95 | extern int vm_dirty_ratio; | 
| Bart Samwel | f6ef943 | 2006-03-24 03:15:48 -0800 | [diff] [blame] | 96 | extern int dirty_writeback_interval; | 
|  | 97 | extern int dirty_expire_interval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | extern int block_dump; | 
|  | 99 | extern int laptop_mode; | 
|  | 100 |  | 
|  | 101 | struct ctl_table; | 
|  | 102 | struct file; | 
|  | 103 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *, | 
|  | 104 | void __user *, size_t *, loff_t *); | 
|  | 105 |  | 
|  | 106 | void page_writeback_init(void); | 
| Andrew Morton | fa5a734 | 2006-03-24 03:18:10 -0800 | [diff] [blame] | 107 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 
|  | 108 | unsigned long nr_pages_dirtied); | 
|  | 109 |  | 
|  | 110 | static inline void | 
|  | 111 | balance_dirty_pages_ratelimited(struct address_space *mapping) | 
|  | 112 | { | 
|  | 113 | balance_dirty_pages_ratelimited_nr(mapping, 1); | 
|  | 114 | } | 
|  | 115 |  | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 116 | typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, | 
|  | 117 | void *data); | 
|  | 118 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); | 
| Miklos Szeredi | 0ea9718 | 2007-05-10 22:22:51 -0700 | [diff] [blame] | 120 | int generic_writepages(struct address_space *mapping, | 
|  | 121 | struct writeback_control *wbc); | 
|  | 122 | int write_cache_pages(struct address_space *mapping, | 
|  | 123 | struct writeback_control *wbc, writepage_t writepage, | 
|  | 124 | void *data); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); | 
|  | 126 | int sync_page_range(struct inode *inode, struct address_space *mapping, | 
| OGAWA Hirofumi | 268fc16 | 2006-01-08 01:02:12 -0800 | [diff] [blame] | 127 | loff_t pos, loff_t count); | 
|  | 128 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, | 
|  | 129 | loff_t pos, loff_t count); | 
| Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 130 | void set_page_dirty_balance(struct page *page); | 
| Chandra Seetharaman | 2d1d43f | 2006-09-29 02:01:25 -0700 | [diff] [blame] | 131 | void writeback_set_ratelimit(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 |  | 
|  | 133 | /* pdflush.c */ | 
|  | 134 | extern int nr_pdflush_threads;	/* Global so it can be exported to sysctl | 
|  | 135 | read-only. */ | 
|  | 136 |  | 
|  | 137 |  | 
|  | 138 | #endif		/* WRITEBACK_H */ |