| Mingming Cao | 8f6e39a | 2008-04-29 22:01:31 -0400 | [diff] [blame] | 1 | /* | 
|  | 2 | *  fs/ext4/mballoc.h | 
|  | 3 | * | 
|  | 4 | *  Written by: Alex Tomas <alex@clusterfs.com> | 
|  | 5 | * | 
|  | 6 | */ | 
|  | 7 | #ifndef _EXT4_MBALLOC_H | 
|  | 8 | #define _EXT4_MBALLOC_H | 
|  | 9 |  | 
|  | 10 | #include <linux/time.h> | 
|  | 11 | #include <linux/fs.h> | 
|  | 12 | #include <linux/namei.h> | 
|  | 13 | #include <linux/quotaops.h> | 
|  | 14 | #include <linux/buffer_head.h> | 
|  | 15 | #include <linux/module.h> | 
|  | 16 | #include <linux/swap.h> | 
|  | 17 | #include <linux/proc_fs.h> | 
|  | 18 | #include <linux/pagemap.h> | 
|  | 19 | #include <linux/seq_file.h> | 
|  | 20 | #include <linux/version.h> | 
|  | 21 | #include "ext4_jbd2.h" | 
|  | 22 | #include "ext4.h" | 
|  | 23 | #include "group.h" | 
|  | 24 |  | 
|  | 25 | /* | 
|  | 26 | * with AGGRESSIVE_CHECK allocator runs consistency checks over | 
|  | 27 | * structures. these checks slow things down a lot | 
|  | 28 | */ | 
|  | 29 | #define AGGRESSIVE_CHECK__ | 
|  | 30 |  | 
|  | 31 | /* | 
|  | 32 | * with DOUBLE_CHECK defined mballoc creates persistent in-core | 
|  | 33 | * bitmaps, maintains and uses them to check for double allocations | 
|  | 34 | */ | 
|  | 35 | #define DOUBLE_CHECK__ | 
|  | 36 |  | 
|  | 37 | /* | 
|  | 38 | */ | 
|  | 39 | #define MB_DEBUG__ | 
|  | 40 | #ifdef MB_DEBUG | 
|  | 41 | #define mb_debug(fmt, a...)	printk(fmt, ##a) | 
|  | 42 | #else | 
|  | 43 | #define mb_debug(fmt, a...) | 
|  | 44 | #endif | 
|  | 45 |  | 
|  | 46 | /* | 
|  | 47 | * with EXT4_MB_HISTORY mballoc stores last N allocations in memory | 
|  | 48 | * and you can monitor it in /proc/fs/ext4/<dev>/mb_history | 
|  | 49 | */ | 
|  | 50 | #define EXT4_MB_HISTORY | 
|  | 51 | #define EXT4_MB_HISTORY_ALLOC		1	/* allocation */ | 
|  | 52 | #define EXT4_MB_HISTORY_PREALLOC	2	/* preallocated blocks used */ | 
|  | 53 | #define EXT4_MB_HISTORY_DISCARD		4	/* preallocation discarded */ | 
|  | 54 | #define EXT4_MB_HISTORY_FREE		8	/* free */ | 
|  | 55 |  | 
|  | 56 | #define EXT4_MB_HISTORY_DEFAULT		(EXT4_MB_HISTORY_ALLOC | \ | 
|  | 57 | EXT4_MB_HISTORY_PREALLOC) | 
|  | 58 |  | 
|  | 59 | /* | 
|  | 60 | * How long mballoc can look for a best extent (in found extents) | 
|  | 61 | */ | 
|  | 62 | #define MB_DEFAULT_MAX_TO_SCAN		200 | 
|  | 63 |  | 
|  | 64 | /* | 
|  | 65 | * How long mballoc must look for a best extent | 
|  | 66 | */ | 
|  | 67 | #define MB_DEFAULT_MIN_TO_SCAN		10 | 
|  | 68 |  | 
|  | 69 | /* | 
|  | 70 | * How many groups mballoc will scan looking for the best chunk | 
|  | 71 | */ | 
|  | 72 | #define MB_DEFAULT_MAX_GROUPS_TO_SCAN	5 | 
|  | 73 |  | 
|  | 74 | /* | 
|  | 75 | * with 'ext4_mb_stats' allocator will collect stats that will be | 
|  | 76 | * shown at umount. The collecting costs though! | 
|  | 77 | */ | 
|  | 78 | #define MB_DEFAULT_STATS		1 | 
|  | 79 |  | 
|  | 80 | /* | 
|  | 81 | * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served | 
|  | 82 | * by the stream allocator, which purpose is to pack requests | 
|  | 83 | * as close each to other as possible to produce smooth I/O traffic | 
|  | 84 | * We use locality group prealloc space for stream request. | 
|  | 85 | * We can tune the same via /proc/fs/ext4/<parition>/stream_req | 
|  | 86 | */ | 
|  | 87 | #define MB_DEFAULT_STREAM_THRESHOLD	16	/* 64K */ | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * for which requests use 2^N search using buddies | 
|  | 91 | */ | 
|  | 92 | #define MB_DEFAULT_ORDER2_REQS		2 | 
|  | 93 |  | 
|  | 94 | /* | 
|  | 95 | * default group prealloc size 512 blocks | 
|  | 96 | */ | 
|  | 97 | #define MB_DEFAULT_GROUP_PREALLOC	512 | 
|  | 98 |  | 
|  | 99 | static struct kmem_cache *ext4_pspace_cachep; | 
|  | 100 | static struct kmem_cache *ext4_ac_cachep; | 
|  | 101 |  | 
|  | 102 | #ifdef EXT4_BB_MAX_BLOCKS | 
|  | 103 | #undef EXT4_BB_MAX_BLOCKS | 
|  | 104 | #endif | 
|  | 105 | #define EXT4_BB_MAX_BLOCKS	30 | 
|  | 106 |  | 
|  | 107 | struct ext4_free_metadata { | 
|  | 108 | ext4_group_t group; | 
|  | 109 | unsigned short num; | 
|  | 110 | ext4_grpblk_t  blocks[EXT4_BB_MAX_BLOCKS]; | 
|  | 111 | struct list_head list; | 
|  | 112 | }; | 
|  | 113 |  | 
|  | 114 | struct ext4_group_info { | 
|  | 115 | unsigned long	bb_state; | 
|  | 116 | unsigned long	bb_tid; | 
|  | 117 | struct ext4_free_metadata *bb_md_cur; | 
|  | 118 | unsigned short	bb_first_free; | 
|  | 119 | unsigned short	bb_free; | 
|  | 120 | unsigned short	bb_fragments; | 
|  | 121 | struct		list_head bb_prealloc_list; | 
|  | 122 | #ifdef DOUBLE_CHECK | 
|  | 123 | void		*bb_bitmap; | 
|  | 124 | #endif | 
|  | 125 | unsigned short	bb_counters[]; | 
|  | 126 | }; | 
|  | 127 |  | 
|  | 128 | #define EXT4_GROUP_INFO_NEED_INIT_BIT	0 | 
|  | 129 | #define EXT4_GROUP_INFO_LOCKED_BIT	1 | 
|  | 130 |  | 
|  | 131 | #define EXT4_MB_GRP_NEED_INIT(grp)	\ | 
|  | 132 | (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) | 
|  | 133 |  | 
|  | 134 |  | 
|  | 135 | struct ext4_prealloc_space { | 
|  | 136 | struct list_head	pa_inode_list; | 
|  | 137 | struct list_head	pa_group_list; | 
|  | 138 | union { | 
|  | 139 | struct list_head pa_tmp_list; | 
|  | 140 | struct rcu_head	pa_rcu; | 
|  | 141 | } u; | 
|  | 142 | spinlock_t		pa_lock; | 
|  | 143 | atomic_t		pa_count; | 
|  | 144 | unsigned		pa_deleted; | 
|  | 145 | ext4_fsblk_t		pa_pstart;	/* phys. block */ | 
|  | 146 | ext4_lblk_t		pa_lstart;	/* log. block */ | 
|  | 147 | unsigned short		pa_len;		/* len of preallocated chunk */ | 
|  | 148 | unsigned short		pa_free;	/* how many blocks are free */ | 
|  | 149 | unsigned short		pa_linear;	/* consumed in one direction | 
|  | 150 | * strictly, for grp prealloc */ | 
|  | 151 | spinlock_t		*pa_obj_lock; | 
|  | 152 | struct inode		*pa_inode;	/* hack, for history only */ | 
|  | 153 | }; | 
|  | 154 |  | 
|  | 155 |  | 
|  | 156 | struct ext4_free_extent { | 
|  | 157 | ext4_lblk_t fe_logical; | 
|  | 158 | ext4_grpblk_t fe_start; | 
|  | 159 | ext4_group_t fe_group; | 
|  | 160 | int fe_len; | 
|  | 161 | }; | 
|  | 162 |  | 
|  | 163 | /* | 
|  | 164 | * Locality group: | 
|  | 165 | *   we try to group all related changes together | 
|  | 166 | *   so that writeback can flush/allocate them together as well | 
| Aneesh Kumar K.V | 6be2ded | 2008-07-23 14:14:05 -0400 | [diff] [blame] | 167 | *   Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC | 
|  | 168 | *   (512). We store prealloc space into the hash based on the pa_free blocks | 
|  | 169 | *   order value.ie, fls(pa_free)-1; | 
| Mingming Cao | 8f6e39a | 2008-04-29 22:01:31 -0400 | [diff] [blame] | 170 | */ | 
| Aneesh Kumar K.V | 6be2ded | 2008-07-23 14:14:05 -0400 | [diff] [blame] | 171 | #define PREALLOC_TB_SIZE 10 | 
| Mingming Cao | 8f6e39a | 2008-04-29 22:01:31 -0400 | [diff] [blame] | 172 | struct ext4_locality_group { | 
|  | 173 | /* for allocator */ | 
| Aneesh Kumar K.V | 6be2ded | 2008-07-23 14:14:05 -0400 | [diff] [blame] | 174 | /* to serialize allocates */ | 
|  | 175 | struct mutex		lg_mutex; | 
|  | 176 | /* list of preallocations */ | 
|  | 177 | struct list_head	lg_prealloc_list[PREALLOC_TB_SIZE]; | 
| Mingming Cao | 8f6e39a | 2008-04-29 22:01:31 -0400 | [diff] [blame] | 178 | spinlock_t		lg_prealloc_lock; | 
|  | 179 | }; | 
|  | 180 |  | 
|  | 181 | struct ext4_allocation_context { | 
|  | 182 | struct inode *ac_inode; | 
|  | 183 | struct super_block *ac_sb; | 
|  | 184 |  | 
|  | 185 | /* original request */ | 
|  | 186 | struct ext4_free_extent ac_o_ex; | 
|  | 187 |  | 
|  | 188 | /* goal request (after normalization) */ | 
|  | 189 | struct ext4_free_extent ac_g_ex; | 
|  | 190 |  | 
|  | 191 | /* the best found extent */ | 
|  | 192 | struct ext4_free_extent ac_b_ex; | 
|  | 193 |  | 
|  | 194 | /* copy of the bext found extent taken before preallocation efforts */ | 
|  | 195 | struct ext4_free_extent ac_f_ex; | 
|  | 196 |  | 
|  | 197 | /* number of iterations done. we have to track to limit searching */ | 
|  | 198 | unsigned long ac_ex_scanned; | 
|  | 199 | __u16 ac_groups_scanned; | 
|  | 200 | __u16 ac_found; | 
|  | 201 | __u16 ac_tail; | 
|  | 202 | __u16 ac_buddy; | 
|  | 203 | __u16 ac_flags;		/* allocation hints */ | 
|  | 204 | __u8 ac_status; | 
|  | 205 | __u8 ac_criteria; | 
|  | 206 | __u8 ac_repeats; | 
|  | 207 | __u8 ac_2order;		/* if request is to allocate 2^N blocks and | 
|  | 208 | * N > 0, the field stores N, otherwise 0 */ | 
|  | 209 | __u8 ac_op;		/* operation, for history only */ | 
|  | 210 | struct page *ac_bitmap_page; | 
|  | 211 | struct page *ac_buddy_page; | 
|  | 212 | struct ext4_prealloc_space *ac_pa; | 
|  | 213 | struct ext4_locality_group *ac_lg; | 
|  | 214 | }; | 
|  | 215 |  | 
|  | 216 | #define AC_STATUS_CONTINUE	1 | 
|  | 217 | #define AC_STATUS_FOUND		2 | 
|  | 218 | #define AC_STATUS_BREAK		3 | 
|  | 219 |  | 
|  | 220 | struct ext4_mb_history { | 
|  | 221 | struct ext4_free_extent orig;	/* orig allocation */ | 
|  | 222 | struct ext4_free_extent goal;	/* goal allocation */ | 
|  | 223 | struct ext4_free_extent result;	/* result allocation */ | 
|  | 224 | unsigned pid; | 
|  | 225 | unsigned ino; | 
|  | 226 | __u16 found;	/* how many extents have been found */ | 
|  | 227 | __u16 groups;	/* how many groups have been scanned */ | 
|  | 228 | __u16 tail;	/* what tail broke some buddy */ | 
|  | 229 | __u16 buddy;	/* buddy the tail ^^^ broke */ | 
|  | 230 | __u16 flags; | 
|  | 231 | __u8 cr:3;	/* which phase the result extent was found at */ | 
|  | 232 | __u8 op:4; | 
|  | 233 | __u8 merged:1; | 
|  | 234 | }; | 
|  | 235 |  | 
|  | 236 | struct ext4_buddy { | 
|  | 237 | struct page *bd_buddy_page; | 
|  | 238 | void *bd_buddy; | 
|  | 239 | struct page *bd_bitmap_page; | 
|  | 240 | void *bd_bitmap; | 
|  | 241 | struct ext4_group_info *bd_info; | 
|  | 242 | struct super_block *bd_sb; | 
|  | 243 | __u16 bd_blkbits; | 
|  | 244 | ext4_group_t bd_group; | 
|  | 245 | }; | 
|  | 246 | #define EXT4_MB_BITMAP(e4b)	((e4b)->bd_bitmap) | 
|  | 247 | #define EXT4_MB_BUDDY(e4b)	((e4b)->bd_buddy) | 
|  | 248 |  | 
|  | 249 | #ifndef EXT4_MB_HISTORY | 
|  | 250 | static inline void ext4_mb_store_history(struct ext4_allocation_context *ac) | 
|  | 251 | { | 
|  | 252 | return; | 
|  | 253 | } | 
|  | 254 | #else | 
|  | 255 | static void ext4_mb_store_history(struct ext4_allocation_context *ac); | 
|  | 256 | #endif | 
|  | 257 |  | 
|  | 258 | #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1) | 
|  | 259 |  | 
| Mingming Cao | 8f6e39a | 2008-04-29 22:01:31 -0400 | [diff] [blame] | 260 | struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t); | 
|  | 261 |  | 
|  | 262 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | 
|  | 263 | ext4_group_t group); | 
|  | 264 | static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *); | 
|  | 265 | static void ext4_mb_free_committed_blocks(struct super_block *); | 
|  | 266 | static void ext4_mb_return_to_preallocation(struct inode *inode, | 
|  | 267 | struct ext4_buddy *e4b, sector_t block, | 
|  | 268 | int count); | 
|  | 269 | static void ext4_mb_put_pa(struct ext4_allocation_context *, | 
|  | 270 | struct super_block *, struct ext4_prealloc_space *pa); | 
|  | 271 | static int ext4_mb_init_per_dev_proc(struct super_block *sb); | 
|  | 272 | static int ext4_mb_destroy_per_dev_proc(struct super_block *sb); | 
|  | 273 |  | 
|  | 274 |  | 
|  | 275 | static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) | 
|  | 276 | { | 
|  | 277 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 
|  | 278 |  | 
|  | 279 | bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | 
|  | 280 | } | 
|  | 281 |  | 
|  | 282 | static inline void ext4_unlock_group(struct super_block *sb, | 
|  | 283 | ext4_group_t group) | 
|  | 284 | { | 
|  | 285 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 
|  | 286 |  | 
|  | 287 | bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | 
|  | 288 | } | 
|  | 289 |  | 
|  | 290 | static inline int ext4_is_group_locked(struct super_block *sb, | 
|  | 291 | ext4_group_t group) | 
|  | 292 | { | 
|  | 293 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | 
|  | 294 |  | 
|  | 295 | return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT, | 
|  | 296 | &(grinfo->bb_state)); | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, | 
|  | 300 | struct ext4_free_extent *fex) | 
|  | 301 | { | 
|  | 302 | ext4_fsblk_t block; | 
|  | 303 |  | 
|  | 304 | block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb) | 
|  | 305 | + fex->fe_start | 
|  | 306 | + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | 
|  | 307 | return block; | 
|  | 308 | } | 
|  | 309 | #endif |