| Jaegeuk Kim | 0a8165d | 2012-11-29 13:28:09 +0900 | [diff] [blame] | 1 | /* | 
| Jaegeuk Kim | 39a53e0 | 2012-11-28 13:37:31 +0900 | [diff] [blame] | 2 |  * fs/f2fs/segment.h | 
 | 3 |  * | 
 | 4 |  * Copyright (c) 2012 Samsung Electronics Co., Ltd. | 
 | 5 |  *             http://www.samsung.com/ | 
 | 6 |  * | 
 | 7 |  * This program is free software; you can redistribute it and/or modify | 
 | 8 |  * it under the terms of the GNU General Public License version 2 as | 
 | 9 |  * published by the Free Software Foundation. | 
 | 10 |  */ | 
 | 11 | /* constant macro */ | 
 | 12 | #define NULL_SEGNO			((unsigned int)(~0)) | 
 | 13 |  | 
 | 14 | /* V: Logical segment # in volume, R: Relative segment # in main area */ | 
 | 15 | #define GET_L2R_SEGNO(free_i, segno)	(segno - free_i->start_segno) | 
 | 16 | #define GET_R2L_SEGNO(free_i, segno)	(segno + free_i->start_segno) | 
 | 17 |  | 
 | 18 | #define IS_DATASEG(t)							\ | 
 | 19 | 	((t == CURSEG_HOT_DATA) || (t == CURSEG_COLD_DATA) ||		\ | 
 | 20 | 	(t == CURSEG_WARM_DATA)) | 
 | 21 |  | 
 | 22 | #define IS_NODESEG(t)							\ | 
 | 23 | 	((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) ||		\ | 
 | 24 | 	(t == CURSEG_WARM_NODE)) | 
 | 25 |  | 
 | 26 | #define IS_CURSEG(sbi, segno)						\ | 
 | 27 | 	((segno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\ | 
 | 28 | 	 (segno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\ | 
 | 29 | 	 (segno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\ | 
 | 30 | 	 (segno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\ | 
 | 31 | 	 (segno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\ | 
 | 32 | 	 (segno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno)) | 
 | 33 |  | 
 | 34 | #define IS_CURSEC(sbi, secno)						\ | 
 | 35 | 	((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\ | 
 | 36 | 	  sbi->segs_per_sec) ||	\ | 
 | 37 | 	 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\ | 
 | 38 | 	  sbi->segs_per_sec) ||	\ | 
 | 39 | 	 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\ | 
 | 40 | 	  sbi->segs_per_sec) ||	\ | 
 | 41 | 	 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\ | 
 | 42 | 	  sbi->segs_per_sec) ||	\ | 
 | 43 | 	 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\ | 
 | 44 | 	  sbi->segs_per_sec) ||	\ | 
 | 45 | 	 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\ | 
 | 46 | 	  sbi->segs_per_sec))	\ | 
 | 47 |  | 
 | 48 | #define START_BLOCK(sbi, segno)						\ | 
 | 49 | 	(SM_I(sbi)->seg0_blkaddr +					\ | 
 | 50 | 	 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg)) | 
 | 51 | #define NEXT_FREE_BLKADDR(sbi, curseg)					\ | 
 | 52 | 	(START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff) | 
 | 53 |  | 
 | 54 | #define MAIN_BASE_BLOCK(sbi)	(SM_I(sbi)->main_blkaddr) | 
 | 55 |  | 
 | 56 | #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)				\ | 
 | 57 | 	((blk_addr) - SM_I(sbi)->seg0_blkaddr) | 
 | 58 | #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\ | 
 | 59 | 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg) | 
 | 60 | #define GET_SEGNO(sbi, blk_addr)					\ | 
 | 61 | 	(((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ?		\ | 
 | 62 | 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\ | 
 | 63 | 		GET_SEGNO_FROM_SEG0(sbi, blk_addr))) | 
 | 64 | #define GET_SECNO(sbi, segno)					\ | 
 | 65 | 	((segno) / sbi->segs_per_sec) | 
 | 66 | #define GET_ZONENO_FROM_SEGNO(sbi, segno)				\ | 
 | 67 | 	((segno / sbi->segs_per_sec) / sbi->secs_per_zone) | 
 | 68 |  | 
 | 69 | #define GET_SUM_BLOCK(sbi, segno)				\ | 
 | 70 | 	((sbi->sm_info->ssa_blkaddr) + segno) | 
 | 71 |  | 
 | 72 | #define GET_SUM_TYPE(footer) ((footer)->entry_type) | 
 | 73 | #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type) | 
 | 74 |  | 
 | 75 | #define SIT_ENTRY_OFFSET(sit_i, segno)					\ | 
 | 76 | 	(segno % sit_i->sents_per_block) | 
 | 77 | #define SIT_BLOCK_OFFSET(sit_i, segno)					\ | 
 | 78 | 	(segno / SIT_ENTRY_PER_BLOCK) | 
 | 79 | #define	START_SEGNO(sit_i, segno)		\ | 
 | 80 | 	(SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK) | 
 | 81 | #define f2fs_bitmap_size(nr)			\ | 
 | 82 | 	(BITS_TO_LONGS(nr) * sizeof(unsigned long)) | 
 | 83 | #define TOTAL_SEGS(sbi)	(SM_I(sbi)->main_segments) | 
 | 84 |  | 
| Jaegeuk Kim | 3cd8a23 | 2012-12-10 09:26:05 +0900 | [diff] [blame] | 85 | #define SECTOR_FROM_BLOCK(sbi, blk_addr)				\ | 
 | 86 | 	(blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE)) | 
 | 87 |  | 
| Jaegeuk Kim | 39a53e0 | 2012-11-28 13:37:31 +0900 | [diff] [blame] | 88 | /* during checkpoint, bio_private is used to synchronize the last bio */ | 
 | 89 | struct bio_private { | 
 | 90 | 	struct f2fs_sb_info *sbi; | 
 | 91 | 	bool is_sync; | 
 | 92 | 	void *wait; | 
 | 93 | }; | 
 | 94 |  | 
 | 95 | /* | 
 | 96 |  * indicate a block allocation direction: RIGHT and LEFT. | 
 | 97 |  * RIGHT means allocating new sections towards the end of volume. | 
 | 98 |  * LEFT means the opposite direction. | 
 | 99 |  */ | 
 | 100 | enum { | 
 | 101 | 	ALLOC_RIGHT = 0, | 
 | 102 | 	ALLOC_LEFT | 
 | 103 | }; | 
 | 104 |  | 
 | 105 | /* | 
 | 106 |  * In the victim_sel_policy->alloc_mode, there are two block allocation modes. | 
 | 107 |  * LFS writes data sequentially with cleaning operations. | 
 | 108 |  * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations. | 
 | 109 |  */ | 
 | 110 | enum { | 
 | 111 | 	LFS = 0, | 
 | 112 | 	SSR | 
 | 113 | }; | 
 | 114 |  | 
 | 115 | /* | 
 | 116 |  * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes. | 
 | 117 |  * GC_CB is based on cost-benefit algorithm. | 
 | 118 |  * GC_GREEDY is based on greedy algorithm. | 
 | 119 |  */ | 
 | 120 | enum { | 
 | 121 | 	GC_CB = 0, | 
 | 122 | 	GC_GREEDY | 
 | 123 | }; | 
 | 124 |  | 
 | 125 | /* | 
 | 126 |  * BG_GC means the background cleaning job. | 
 | 127 |  * FG_GC means the on-demand cleaning job. | 
 | 128 |  */ | 
 | 129 | enum { | 
 | 130 | 	BG_GC = 0, | 
 | 131 | 	FG_GC | 
 | 132 | }; | 
 | 133 |  | 
 | 134 | /* for a function parameter to select a victim segment */ | 
 | 135 | struct victim_sel_policy { | 
 | 136 | 	int alloc_mode;			/* LFS or SSR */ | 
 | 137 | 	int gc_mode;			/* GC_CB or GC_GREEDY */ | 
 | 138 | 	unsigned long *dirty_segmap;	/* dirty segment bitmap */ | 
 | 139 | 	unsigned int offset;		/* last scanned bitmap offset */ | 
 | 140 | 	unsigned int ofs_unit;		/* bitmap search unit */ | 
 | 141 | 	unsigned int min_cost;		/* minimum cost */ | 
 | 142 | 	unsigned int min_segno;		/* segment # having min. cost */ | 
 | 143 | }; | 
 | 144 |  | 
 | 145 | struct seg_entry { | 
 | 146 | 	unsigned short valid_blocks;	/* # of valid blocks */ | 
 | 147 | 	unsigned char *cur_valid_map;	/* validity bitmap of blocks */ | 
 | 148 | 	/* | 
 | 149 | 	 * # of valid blocks and the validity bitmap stored in the the last | 
 | 150 | 	 * checkpoint pack. This information is used by the SSR mode. | 
 | 151 | 	 */ | 
 | 152 | 	unsigned short ckpt_valid_blocks; | 
 | 153 | 	unsigned char *ckpt_valid_map; | 
 | 154 | 	unsigned char type;		/* segment type like CURSEG_XXX_TYPE */ | 
 | 155 | 	unsigned long long mtime;	/* modification time of the segment */ | 
 | 156 | }; | 
 | 157 |  | 
 | 158 | struct sec_entry { | 
 | 159 | 	unsigned int valid_blocks;	/* # of valid blocks in a section */ | 
 | 160 | }; | 
 | 161 |  | 
 | 162 | struct segment_allocation { | 
 | 163 | 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool); | 
 | 164 | }; | 
 | 165 |  | 
 | 166 | struct sit_info { | 
 | 167 | 	const struct segment_allocation *s_ops; | 
 | 168 |  | 
 | 169 | 	block_t sit_base_addr;		/* start block address of SIT area */ | 
 | 170 | 	block_t sit_blocks;		/* # of blocks used by SIT area */ | 
 | 171 | 	block_t written_valid_blocks;	/* # of valid blocks in main area */ | 
 | 172 | 	char *sit_bitmap;		/* SIT bitmap pointer */ | 
 | 173 | 	unsigned int bitmap_size;	/* SIT bitmap size */ | 
 | 174 |  | 
 | 175 | 	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */ | 
 | 176 | 	unsigned int dirty_sentries;		/* # of dirty sentries */ | 
 | 177 | 	unsigned int sents_per_block;		/* # of SIT entries per block */ | 
 | 178 | 	struct mutex sentry_lock;		/* to protect SIT cache */ | 
 | 179 | 	struct seg_entry *sentries;		/* SIT segment-level cache */ | 
 | 180 | 	struct sec_entry *sec_entries;		/* SIT section-level cache */ | 
 | 181 |  | 
 | 182 | 	/* for cost-benefit algorithm in cleaning procedure */ | 
 | 183 | 	unsigned long long elapsed_time;	/* elapsed time after mount */ | 
 | 184 | 	unsigned long long mounted_time;	/* mount time */ | 
 | 185 | 	unsigned long long min_mtime;		/* min. modification time */ | 
 | 186 | 	unsigned long long max_mtime;		/* max. modification time */ | 
 | 187 | }; | 
 | 188 |  | 
 | 189 | struct free_segmap_info { | 
 | 190 | 	unsigned int start_segno;	/* start segment number logically */ | 
 | 191 | 	unsigned int free_segments;	/* # of free segments */ | 
 | 192 | 	unsigned int free_sections;	/* # of free sections */ | 
 | 193 | 	rwlock_t segmap_lock;		/* free segmap lock */ | 
 | 194 | 	unsigned long *free_segmap;	/* free segment bitmap */ | 
 | 195 | 	unsigned long *free_secmap;	/* free section bitmap */ | 
 | 196 | }; | 
 | 197 |  | 
 | 198 | /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */ | 
 | 199 | enum dirty_type { | 
 | 200 | 	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */ | 
 | 201 | 	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */ | 
 | 202 | 	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */ | 
 | 203 | 	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */ | 
 | 204 | 	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */ | 
 | 205 | 	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */ | 
 | 206 | 	DIRTY,			/* to count # of dirty segments */ | 
 | 207 | 	PRE,			/* to count # of entirely obsolete segments */ | 
 | 208 | 	NR_DIRTY_TYPE | 
 | 209 | }; | 
 | 210 |  | 
 | 211 | struct dirty_seglist_info { | 
 | 212 | 	const struct victim_selection *v_ops;	/* victim selction operation */ | 
 | 213 | 	unsigned long *dirty_segmap[NR_DIRTY_TYPE]; | 
 | 214 | 	struct mutex seglist_lock;		/* lock for segment bitmaps */ | 
 | 215 | 	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */ | 
 | 216 | 	unsigned long *victim_segmap[2];	/* BG_GC, FG_GC */ | 
 | 217 | }; | 
 | 218 |  | 
 | 219 | /* victim selection function for cleaning and SSR */ | 
 | 220 | struct victim_selection { | 
 | 221 | 	int (*get_victim)(struct f2fs_sb_info *, unsigned int *, | 
 | 222 | 							int, int, char); | 
 | 223 | }; | 
 | 224 |  | 
 | 225 | /* for active log information */ | 
 | 226 | struct curseg_info { | 
 | 227 | 	struct mutex curseg_mutex;		/* lock for consistency */ | 
 | 228 | 	struct f2fs_summary_block *sum_blk;	/* cached summary block */ | 
 | 229 | 	unsigned char alloc_type;		/* current allocation type */ | 
 | 230 | 	unsigned int segno;			/* current segment number */ | 
 | 231 | 	unsigned short next_blkoff;		/* next block offset to write */ | 
 | 232 | 	unsigned int zone;			/* current zone number */ | 
 | 233 | 	unsigned int next_segno;		/* preallocated segment */ | 
 | 234 | }; | 
 | 235 |  | 
 | 236 | /* | 
 | 237 |  * inline functions | 
 | 238 |  */ | 
 | 239 | static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type) | 
 | 240 | { | 
 | 241 | 	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type); | 
 | 242 | } | 
 | 243 |  | 
 | 244 | static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, | 
 | 245 | 						unsigned int segno) | 
 | 246 | { | 
 | 247 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 248 | 	return &sit_i->sentries[segno]; | 
 | 249 | } | 
 | 250 |  | 
 | 251 | static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi, | 
 | 252 | 						unsigned int segno) | 
 | 253 | { | 
 | 254 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 255 | 	return &sit_i->sec_entries[GET_SECNO(sbi, segno)]; | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi, | 
 | 259 | 				unsigned int segno, int section) | 
 | 260 | { | 
 | 261 | 	/* | 
 | 262 | 	 * In order to get # of valid blocks in a section instantly from many | 
 | 263 | 	 * segments, f2fs manages two counting structures separately. | 
 | 264 | 	 */ | 
 | 265 | 	if (section > 1) | 
 | 266 | 		return get_sec_entry(sbi, segno)->valid_blocks; | 
 | 267 | 	else | 
 | 268 | 		return get_seg_entry(sbi, segno)->valid_blocks; | 
 | 269 | } | 
 | 270 |  | 
 | 271 | static inline void seg_info_from_raw_sit(struct seg_entry *se, | 
 | 272 | 					struct f2fs_sit_entry *rs) | 
 | 273 | { | 
 | 274 | 	se->valid_blocks = GET_SIT_VBLOCKS(rs); | 
 | 275 | 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs); | 
 | 276 | 	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); | 
 | 277 | 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); | 
 | 278 | 	se->type = GET_SIT_TYPE(rs); | 
 | 279 | 	se->mtime = le64_to_cpu(rs->mtime); | 
 | 280 | } | 
 | 281 |  | 
 | 282 | static inline void seg_info_to_raw_sit(struct seg_entry *se, | 
 | 283 | 					struct f2fs_sit_entry *rs) | 
 | 284 | { | 
 | 285 | 	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | | 
 | 286 | 					se->valid_blocks; | 
 | 287 | 	rs->vblocks = cpu_to_le16(raw_vblocks); | 
 | 288 | 	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); | 
 | 289 | 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE); | 
 | 290 | 	se->ckpt_valid_blocks = se->valid_blocks; | 
 | 291 | 	rs->mtime = cpu_to_le64(se->mtime); | 
 | 292 | } | 
 | 293 |  | 
 | 294 | static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, | 
 | 295 | 		unsigned int max, unsigned int segno) | 
 | 296 | { | 
 | 297 | 	unsigned int ret; | 
 | 298 | 	read_lock(&free_i->segmap_lock); | 
 | 299 | 	ret = find_next_bit(free_i->free_segmap, max, segno); | 
 | 300 | 	read_unlock(&free_i->segmap_lock); | 
 | 301 | 	return ret; | 
 | 302 | } | 
 | 303 |  | 
 | 304 | static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) | 
 | 305 | { | 
 | 306 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 307 | 	unsigned int secno = segno / sbi->segs_per_sec; | 
 | 308 | 	unsigned int start_segno = secno * sbi->segs_per_sec; | 
 | 309 | 	unsigned int next; | 
 | 310 |  | 
 | 311 | 	write_lock(&free_i->segmap_lock); | 
 | 312 | 	clear_bit(segno, free_i->free_segmap); | 
 | 313 | 	free_i->free_segments++; | 
 | 314 |  | 
 | 315 | 	next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno); | 
 | 316 | 	if (next >= start_segno + sbi->segs_per_sec) { | 
 | 317 | 		clear_bit(secno, free_i->free_secmap); | 
 | 318 | 		free_i->free_sections++; | 
 | 319 | 	} | 
 | 320 | 	write_unlock(&free_i->segmap_lock); | 
 | 321 | } | 
 | 322 |  | 
 | 323 | static inline void __set_inuse(struct f2fs_sb_info *sbi, | 
 | 324 | 		unsigned int segno) | 
 | 325 | { | 
 | 326 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 327 | 	unsigned int secno = segno / sbi->segs_per_sec; | 
 | 328 | 	set_bit(segno, free_i->free_segmap); | 
 | 329 | 	free_i->free_segments--; | 
 | 330 | 	if (!test_and_set_bit(secno, free_i->free_secmap)) | 
 | 331 | 		free_i->free_sections--; | 
 | 332 | } | 
 | 333 |  | 
 | 334 | static inline void __set_test_and_free(struct f2fs_sb_info *sbi, | 
 | 335 | 		unsigned int segno) | 
 | 336 | { | 
 | 337 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 338 | 	unsigned int secno = segno / sbi->segs_per_sec; | 
 | 339 | 	unsigned int start_segno = secno * sbi->segs_per_sec; | 
 | 340 | 	unsigned int next; | 
 | 341 |  | 
 | 342 | 	write_lock(&free_i->segmap_lock); | 
 | 343 | 	if (test_and_clear_bit(segno, free_i->free_segmap)) { | 
 | 344 | 		free_i->free_segments++; | 
 | 345 |  | 
 | 346 | 		next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), | 
 | 347 | 								start_segno); | 
 | 348 | 		if (next >= start_segno + sbi->segs_per_sec) { | 
 | 349 | 			if (test_and_clear_bit(secno, free_i->free_secmap)) | 
 | 350 | 				free_i->free_sections++; | 
 | 351 | 		} | 
 | 352 | 	} | 
 | 353 | 	write_unlock(&free_i->segmap_lock); | 
 | 354 | } | 
 | 355 |  | 
 | 356 | static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, | 
 | 357 | 		unsigned int segno) | 
 | 358 | { | 
 | 359 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 360 | 	unsigned int secno = segno / sbi->segs_per_sec; | 
 | 361 | 	write_lock(&free_i->segmap_lock); | 
 | 362 | 	if (!test_and_set_bit(segno, free_i->free_segmap)) { | 
 | 363 | 		free_i->free_segments--; | 
 | 364 | 		if (!test_and_set_bit(secno, free_i->free_secmap)) | 
 | 365 | 			free_i->free_sections--; | 
 | 366 | 	} | 
 | 367 | 	write_unlock(&free_i->segmap_lock); | 
 | 368 | } | 
 | 369 |  | 
 | 370 | static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, | 
 | 371 | 		void *dst_addr) | 
 | 372 | { | 
 | 373 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 374 | 	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size); | 
 | 375 | } | 
 | 376 |  | 
 | 377 | static inline block_t written_block_count(struct f2fs_sb_info *sbi) | 
 | 378 | { | 
 | 379 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 380 | 	block_t vblocks; | 
 | 381 |  | 
 | 382 | 	mutex_lock(&sit_i->sentry_lock); | 
 | 383 | 	vblocks = sit_i->written_valid_blocks; | 
 | 384 | 	mutex_unlock(&sit_i->sentry_lock); | 
 | 385 |  | 
 | 386 | 	return vblocks; | 
 | 387 | } | 
 | 388 |  | 
 | 389 | static inline unsigned int free_segments(struct f2fs_sb_info *sbi) | 
 | 390 | { | 
 | 391 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 392 | 	unsigned int free_segs; | 
 | 393 |  | 
 | 394 | 	read_lock(&free_i->segmap_lock); | 
 | 395 | 	free_segs = free_i->free_segments; | 
 | 396 | 	read_unlock(&free_i->segmap_lock); | 
 | 397 |  | 
 | 398 | 	return free_segs; | 
 | 399 | } | 
 | 400 |  | 
 | 401 | static inline int reserved_segments(struct f2fs_sb_info *sbi) | 
 | 402 | { | 
 | 403 | 	return SM_I(sbi)->reserved_segments; | 
 | 404 | } | 
 | 405 |  | 
 | 406 | static inline unsigned int free_sections(struct f2fs_sb_info *sbi) | 
 | 407 | { | 
 | 408 | 	struct free_segmap_info *free_i = FREE_I(sbi); | 
 | 409 | 	unsigned int free_secs; | 
 | 410 |  | 
 | 411 | 	read_lock(&free_i->segmap_lock); | 
 | 412 | 	free_secs = free_i->free_sections; | 
 | 413 | 	read_unlock(&free_i->segmap_lock); | 
 | 414 |  | 
 | 415 | 	return free_secs; | 
 | 416 | } | 
 | 417 |  | 
 | 418 | static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi) | 
 | 419 | { | 
 | 420 | 	return DIRTY_I(sbi)->nr_dirty[PRE]; | 
 | 421 | } | 
 | 422 |  | 
 | 423 | static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi) | 
 | 424 | { | 
 | 425 | 	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] + | 
 | 426 | 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] + | 
 | 427 | 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] + | 
 | 428 | 		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] + | 
 | 429 | 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] + | 
 | 430 | 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE]; | 
 | 431 | } | 
 | 432 |  | 
 | 433 | static inline int overprovision_segments(struct f2fs_sb_info *sbi) | 
 | 434 | { | 
 | 435 | 	return SM_I(sbi)->ovp_segments; | 
 | 436 | } | 
 | 437 |  | 
 | 438 | static inline int overprovision_sections(struct f2fs_sb_info *sbi) | 
 | 439 | { | 
 | 440 | 	return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec; | 
 | 441 | } | 
 | 442 |  | 
 | 443 | static inline int reserved_sections(struct f2fs_sb_info *sbi) | 
 | 444 | { | 
 | 445 | 	return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec; | 
 | 446 | } | 
 | 447 |  | 
 | 448 | static inline bool need_SSR(struct f2fs_sb_info *sbi) | 
 | 449 | { | 
 | 450 | 	return (free_sections(sbi) < overprovision_sections(sbi)); | 
 | 451 | } | 
 | 452 |  | 
| Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 453 | static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) | 
| Jaegeuk Kim | 39a53e0 | 2012-11-28 13:37:31 +0900 | [diff] [blame] | 454 | { | 
| Namjae Jeon | 5ac206c | 2013-02-02 23:52:59 +0900 | [diff] [blame] | 455 | 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); | 
 | 456 | 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); | 
| Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 457 |  | 
| Jaegeuk Kim | 029cd28 | 2012-12-21 17:20:21 +0900 | [diff] [blame] | 458 | 	if (sbi->por_doing) | 
 | 459 | 		return false; | 
 | 460 |  | 
| Jaegeuk Kim | 4372752 | 2013-02-04 15:11:17 +0900 | [diff] [blame] | 461 | 	return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + | 
| Namjae Jeon | b1f1daf | 2013-02-02 23:53:15 +0900 | [diff] [blame] | 462 | 						reserved_sections(sbi))); | 
| Jaegeuk Kim | 39a53e0 | 2012-11-28 13:37:31 +0900 | [diff] [blame] | 463 | } | 
 | 464 |  | 
 | 465 | static inline int utilization(struct f2fs_sb_info *sbi) | 
 | 466 | { | 
 | 467 | 	return (long int)valid_user_blocks(sbi) * 100 / | 
 | 468 | 			(long int)sbi->user_block_count; | 
 | 469 | } | 
 | 470 |  | 
 | 471 | /* | 
 | 472 |  * Sometimes f2fs may be better to drop out-of-place update policy. | 
 | 473 |  * So, if fs utilization is over MIN_IPU_UTIL, then f2fs tries to write | 
 | 474 |  * data in the original place likewise other traditional file systems. | 
 | 475 |  * But, currently set 100 in percentage, which means it is disabled. | 
 | 476 |  * See below need_inplace_update(). | 
 | 477 |  */ | 
 | 478 | #define MIN_IPU_UTIL		100 | 
 | 479 | static inline bool need_inplace_update(struct inode *inode) | 
 | 480 | { | 
 | 481 | 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 
 | 482 | 	if (S_ISDIR(inode->i_mode)) | 
 | 483 | 		return false; | 
 | 484 | 	if (need_SSR(sbi) && utilization(sbi) > MIN_IPU_UTIL) | 
 | 485 | 		return true; | 
 | 486 | 	return false; | 
 | 487 | } | 
 | 488 |  | 
 | 489 | static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi, | 
 | 490 | 		int type) | 
 | 491 | { | 
 | 492 | 	struct curseg_info *curseg = CURSEG_I(sbi, type); | 
 | 493 | 	return curseg->segno; | 
 | 494 | } | 
 | 495 |  | 
 | 496 | static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi, | 
 | 497 | 		int type) | 
 | 498 | { | 
 | 499 | 	struct curseg_info *curseg = CURSEG_I(sbi, type); | 
 | 500 | 	return curseg->alloc_type; | 
 | 501 | } | 
 | 502 |  | 
 | 503 | static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type) | 
 | 504 | { | 
 | 505 | 	struct curseg_info *curseg = CURSEG_I(sbi, type); | 
 | 506 | 	return curseg->next_blkoff; | 
 | 507 | } | 
 | 508 |  | 
 | 509 | static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) | 
 | 510 | { | 
 | 511 | 	unsigned int end_segno = SM_I(sbi)->segment_count - 1; | 
 | 512 | 	BUG_ON(segno > end_segno); | 
 | 513 | } | 
 | 514 |  | 
 | 515 | /* | 
 | 516 |  * This function is used for only debugging. | 
 | 517 |  * NOTE: In future, we have to remove this function. | 
 | 518 |  */ | 
 | 519 | static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) | 
 | 520 | { | 
 | 521 | 	struct f2fs_sm_info *sm_info = SM_I(sbi); | 
 | 522 | 	block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg; | 
 | 523 | 	block_t start_addr = sm_info->seg0_blkaddr; | 
 | 524 | 	block_t end_addr = start_addr + total_blks - 1; | 
 | 525 | 	BUG_ON(blk_addr < start_addr); | 
 | 526 | 	BUG_ON(blk_addr > end_addr); | 
 | 527 | } | 
 | 528 |  | 
 | 529 | /* | 
 | 530 |  * Summary block is always treated as invalid block | 
 | 531 |  */ | 
 | 532 | static inline void check_block_count(struct f2fs_sb_info *sbi, | 
 | 533 | 		int segno, struct f2fs_sit_entry *raw_sit) | 
 | 534 | { | 
 | 535 | 	struct f2fs_sm_info *sm_info = SM_I(sbi); | 
 | 536 | 	unsigned int end_segno = sm_info->segment_count - 1; | 
 | 537 | 	int valid_blocks = 0; | 
 | 538 | 	int i; | 
 | 539 |  | 
 | 540 | 	/* check segment usage */ | 
 | 541 | 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg); | 
 | 542 |  | 
 | 543 | 	/* check boundary of a given segment number */ | 
 | 544 | 	BUG_ON(segno > end_segno); | 
 | 545 |  | 
 | 546 | 	/* check bitmap with valid block count */ | 
 | 547 | 	for (i = 0; i < sbi->blocks_per_seg; i++) | 
 | 548 | 		if (f2fs_test_bit(i, raw_sit->valid_map)) | 
 | 549 | 			valid_blocks++; | 
 | 550 | 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks); | 
 | 551 | } | 
 | 552 |  | 
 | 553 | static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi, | 
 | 554 | 						unsigned int start) | 
 | 555 | { | 
 | 556 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 557 | 	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start); | 
 | 558 | 	block_t blk_addr = sit_i->sit_base_addr + offset; | 
 | 559 |  | 
 | 560 | 	check_seg_range(sbi, start); | 
 | 561 |  | 
 | 562 | 	/* calculate sit block address */ | 
 | 563 | 	if (f2fs_test_bit(offset, sit_i->sit_bitmap)) | 
 | 564 | 		blk_addr += sit_i->sit_blocks; | 
 | 565 |  | 
 | 566 | 	return blk_addr; | 
 | 567 | } | 
 | 568 |  | 
 | 569 | static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi, | 
 | 570 | 						pgoff_t block_addr) | 
 | 571 | { | 
 | 572 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 573 | 	block_addr -= sit_i->sit_base_addr; | 
 | 574 | 	if (block_addr < sit_i->sit_blocks) | 
 | 575 | 		block_addr += sit_i->sit_blocks; | 
 | 576 | 	else | 
 | 577 | 		block_addr -= sit_i->sit_blocks; | 
 | 578 |  | 
 | 579 | 	return block_addr + sit_i->sit_base_addr; | 
 | 580 | } | 
 | 581 |  | 
 | 582 | static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start) | 
 | 583 | { | 
 | 584 | 	unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start); | 
 | 585 |  | 
 | 586 | 	if (f2fs_test_bit(block_off, sit_i->sit_bitmap)) | 
 | 587 | 		f2fs_clear_bit(block_off, sit_i->sit_bitmap); | 
 | 588 | 	else | 
 | 589 | 		f2fs_set_bit(block_off, sit_i->sit_bitmap); | 
 | 590 | } | 
 | 591 |  | 
 | 592 | static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi) | 
 | 593 | { | 
 | 594 | 	struct sit_info *sit_i = SIT_I(sbi); | 
 | 595 | 	return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec - | 
 | 596 | 						sit_i->mounted_time; | 
 | 597 | } | 
 | 598 |  | 
 | 599 | static inline void set_summary(struct f2fs_summary *sum, nid_t nid, | 
 | 600 | 			unsigned int ofs_in_node, unsigned char version) | 
 | 601 | { | 
 | 602 | 	sum->nid = cpu_to_le32(nid); | 
 | 603 | 	sum->ofs_in_node = cpu_to_le16(ofs_in_node); | 
 | 604 | 	sum->version = version; | 
 | 605 | } | 
 | 606 |  | 
 | 607 | static inline block_t start_sum_block(struct f2fs_sb_info *sbi) | 
 | 608 | { | 
 | 609 | 	return __start_cp_addr(sbi) + | 
 | 610 | 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); | 
 | 611 | } | 
 | 612 |  | 
 | 613 | static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type) | 
 | 614 | { | 
 | 615 | 	return __start_cp_addr(sbi) + | 
 | 616 | 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count) | 
 | 617 | 				- (base + 1) + type; | 
 | 618 | } |