blob: 3a57975b73cc7f0651d5ae8edeab3bf045e5724e [file] [log] [blame]
Alex Tomasc9de5602008-01-29 00:19:52 -05001/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
Bobi Jam18aadd42012-02-20 17:53:02 -050024#include "ext4_jbd2.h"
Mingming Cao8f6e39a2008-04-29 22:01:31 -040025#include "mballoc.h"
Theodore Ts'o6ba495e2009-09-18 13:38:55 -040026#include <linux/debugfs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Theodore Ts'o9bffad12009-06-17 11:48:11 -040028#include <trace/events/ext4.h>
29
Alex Tomasc9de5602008-01-29 00:19:52 -050030/*
31 * MUSTDO:
32 * - test ext4_ext_search_left() and ext4_ext_search_right()
33 * - search for metadata in few groups
34 *
35 * TODO v4:
36 * - normalization should take into account whether file is still open
37 * - discard preallocations if no free space left (policy?)
38 * - don't normalize tails
39 * - quota
40 * - reservation for superuser
41 *
42 * TODO v3:
43 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
44 * - track min/max extents in each group for better group selection
45 * - mb_mark_used() may allocate chunk right after splitting buddy
46 * - tree of groups sorted by number of free blocks
47 * - error handling
48 */
49
50/*
51 * The allocation request involve request for multiple number of blocks
52 * near to the goal(block) value specified.
53 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040054 * During initialization phase of the allocator we decide to use the
55 * group preallocation or inode preallocation depending on the size of
56 * the file. The size of the file could be the resulting file size we
57 * would have after allocation, or the current file size, which ever
58 * is larger. If the size is less than sbi->s_mb_stream_request we
59 * select to use the group preallocation. The default value of
60 * s_mb_stream_request is 16 blocks. This can also be tuned via
61 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
62 * terms of number of blocks.
Alex Tomasc9de5602008-01-29 00:19:52 -050063 *
64 * The main motivation for having small file use group preallocation is to
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040065 * ensure that we have small files closer together on the disk.
Alex Tomasc9de5602008-01-29 00:19:52 -050066 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040067 * First stage the allocator looks at the inode prealloc list,
68 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
69 * spaces for this particular inode. The inode prealloc space is
70 * represented as:
Alex Tomasc9de5602008-01-29 00:19:52 -050071 *
72 * pa_lstart -> the logical start block for this prealloc space
73 * pa_pstart -> the physical start block for this prealloc space
Theodore Ts'o53accfa2011-09-09 18:48:51 -040074 * pa_len -> length for this prealloc space (in clusters)
75 * pa_free -> free space available in this prealloc space (in clusters)
Alex Tomasc9de5602008-01-29 00:19:52 -050076 *
77 * The inode preallocation space is used looking at the _logical_ start
78 * block. If only the logical file block falls within the range of prealloc
Tao Macaaf7a22011-07-11 18:42:42 -040079 * space we will consume the particular prealloc space. This makes sure that
80 * we have contiguous physical blocks representing the file blocks
Alex Tomasc9de5602008-01-29 00:19:52 -050081 *
82 * The important thing to be noted in case of inode prealloc space is that
83 * we don't modify the values associated to inode prealloc space except
84 * pa_free.
85 *
86 * If we are not able to find blocks in the inode prealloc space and if we
87 * have the group allocation flag set then we look at the locality group
Tao Macaaf7a22011-07-11 18:42:42 -040088 * prealloc space. These are per CPU prealloc list represented as
Alex Tomasc9de5602008-01-29 00:19:52 -050089 *
90 * ext4_sb_info.s_locality_groups[smp_processor_id()]
91 *
92 * The reason for having a per cpu locality group is to reduce the contention
93 * between CPUs. It is possible to get scheduled at this point.
94 *
95 * The locality group prealloc space is used looking at whether we have
Lucas De Marchi25985ed2011-03-30 22:57:33 -030096 * enough free space (pa_free) within the prealloc space.
Alex Tomasc9de5602008-01-29 00:19:52 -050097 *
98 * If we can't allocate blocks via inode prealloc or/and locality group
99 * prealloc then we look at the buddy cache. The buddy cache is represented
100 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
101 * mapped to the buddy and bitmap information regarding different
102 * groups. The buddy information is attached to buddy cache inode so that
103 * we can access them through the page cache. The information regarding
104 * each group is loaded via ext4_mb_load_buddy. The information involve
105 * block bitmap and buddy information. The information are stored in the
106 * inode as:
107 *
108 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500109 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500110 *
111 *
112 * one block each for bitmap and buddy information. So for each group we
113 * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
114 * blocksize) blocks. So it can have information regarding groups_per_page
115 * which is blocks_per_page/2
116 *
117 * The buddy cache inode is not stored on disk. The inode is thrown
118 * away when the filesystem is unmounted.
119 *
120 * We look for count number of blocks in the buddy cache. If we were able
121 * to locate that many free blocks we return with additional information
122 * regarding rest of the contiguous physical block available
123 *
124 * Before allocating blocks via buddy cache we normalize the request
125 * blocks. This ensure we ask for more blocks that we needed. The extra
126 * blocks that we get after allocation is added to the respective prealloc
127 * list. In case of inode preallocation we follow a list of heuristics
128 * based on file size. This can be found in ext4_mb_normalize_request. If
129 * we are doing a group prealloc we try to normalize the request to
Theodore Ts'o27baebb2011-09-09 19:02:51 -0400130 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
131 * dependent on the cluster size; for non-bigalloc file systems, it is
Alex Tomasc9de5602008-01-29 00:19:52 -0500132 * 512 blocks. This can be tuned via
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400133 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
Alex Tomasc9de5602008-01-29 00:19:52 -0500134 * terms of number of blocks. If we have mounted the file system with -O
135 * stripe=<value> option the group prealloc request is normalized to the
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400136 * the smallest multiple of the stripe value (sbi->s_stripe) which is
137 * greater than the default mb_group_prealloc.
Alex Tomasc9de5602008-01-29 00:19:52 -0500138 *
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400139 * The regular allocator (using the buddy cache) supports a few tunables.
Alex Tomasc9de5602008-01-29 00:19:52 -0500140 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400141 * /sys/fs/ext4/<partition>/mb_min_to_scan
142 * /sys/fs/ext4/<partition>/mb_max_to_scan
143 * /sys/fs/ext4/<partition>/mb_order2_req
Alex Tomasc9de5602008-01-29 00:19:52 -0500144 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400145 * The regular allocator uses buddy scan only if the request len is power of
Alex Tomasc9de5602008-01-29 00:19:52 -0500146 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
147 * value of s_mb_order2_reqs can be tuned via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400148 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200149 * stripe size (sbi->s_stripe), we try to search for contiguous block in
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400150 * stripe size. This should result in better allocation on RAID setups. If
151 * not, we search in the specific group using bitmap for best extents. The
152 * tunable min_to_scan and max_to_scan control the behaviour here.
Alex Tomasc9de5602008-01-29 00:19:52 -0500153 * min_to_scan indicate how long the mballoc __must__ look for a best
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400154 * extent and max_to_scan indicates how long the mballoc __can__ look for a
Alex Tomasc9de5602008-01-29 00:19:52 -0500155 * best extent in the found extents. Searching for the blocks starts with
156 * the group specified as the goal value in allocation context via
157 * ac_g_ex. Each group is first checked based on the criteria whether it
Tao Macaaf7a22011-07-11 18:42:42 -0400158 * can be used for allocation. ext4_mb_good_group explains how the groups are
Alex Tomasc9de5602008-01-29 00:19:52 -0500159 * checked.
160 *
161 * Both the prealloc space are getting populated as above. So for the first
162 * request we will hit the buddy cache which will result in this prealloc
163 * space getting filled. The prealloc space is then later used for the
164 * subsequent request.
165 */
166
167/*
168 * mballoc operates on the following data:
169 * - on-disk bitmap
170 * - in-core buddy (actually includes buddy and bitmap)
171 * - preallocation descriptors (PAs)
172 *
173 * there are two types of preallocations:
174 * - inode
175 * assiged to specific inode and can be used for this inode only.
176 * it describes part of inode's space preallocated to specific
177 * physical blocks. any block from that preallocated can be used
178 * independent. the descriptor just tracks number of blocks left
179 * unused. so, before taking some block from descriptor, one must
180 * make sure corresponded logical block isn't allocated yet. this
181 * also means that freeing any block within descriptor's range
182 * must discard all preallocated blocks.
183 * - locality group
184 * assigned to specific locality group which does not translate to
185 * permanent set of inodes: inode can join and leave group. space
186 * from this type of preallocation can be used for any inode. thus
187 * it's consumed from the beginning to the end.
188 *
189 * relation between them can be expressed as:
190 * in-core buddy = on-disk bitmap + preallocation descriptors
191 *
192 * this mean blocks mballoc considers used are:
193 * - allocated blocks (persistent)
194 * - preallocated blocks (non-persistent)
195 *
196 * consistency in mballoc world means that at any time a block is either
197 * free or used in ALL structures. notice: "any time" should not be read
198 * literally -- time is discrete and delimited by locks.
199 *
200 * to keep it simple, we don't use block numbers, instead we count number of
201 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
202 *
203 * all operations can be expressed as:
204 * - init buddy: buddy = on-disk + PAs
205 * - new PA: buddy += N; PA = N
206 * - use inode PA: on-disk += N; PA -= N
207 * - discard inode PA buddy -= on-disk - PA; PA = 0
208 * - use locality group PA on-disk += N; PA -= N
209 * - discard locality group PA buddy -= PA; PA = 0
210 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
211 * is used in real operation because we can't know actual used
212 * bits from PA, only from on-disk bitmap
213 *
214 * if we follow this strict logic, then all operations above should be atomic.
215 * given some of them can block, we'd have to use something like semaphores
216 * killing performance on high-end SMP hardware. let's try to relax it using
217 * the following knowledge:
218 * 1) if buddy is referenced, it's already initialized
219 * 2) while block is used in buddy and the buddy is referenced,
220 * nobody can re-allocate that block
221 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
222 * bit set and PA claims same block, it's OK. IOW, one can set bit in
223 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
224 * block
225 *
226 * so, now we're building a concurrency table:
227 * - init buddy vs.
228 * - new PA
229 * blocks for PA are allocated in the buddy, buddy must be referenced
230 * until PA is linked to allocation group to avoid concurrent buddy init
231 * - use inode PA
232 * we need to make sure that either on-disk bitmap or PA has uptodate data
233 * given (3) we care that PA-=N operation doesn't interfere with init
234 * - discard inode PA
235 * the simplest way would be to have buddy initialized by the discard
236 * - use locality group PA
237 * again PA-=N must be serialized with init
238 * - discard locality group PA
239 * the simplest way would be to have buddy initialized by the discard
240 * - new PA vs.
241 * - use inode PA
242 * i_data_sem serializes them
243 * - discard inode PA
244 * discard process must wait until PA isn't used by another process
245 * - use locality group PA
246 * some mutex should serialize them
247 * - discard locality group PA
248 * discard process must wait until PA isn't used by another process
249 * - use inode PA
250 * - use inode PA
251 * i_data_sem or another mutex should serializes them
252 * - discard inode PA
253 * discard process must wait until PA isn't used by another process
254 * - use locality group PA
255 * nothing wrong here -- they're different PAs covering different blocks
256 * - discard locality group PA
257 * discard process must wait until PA isn't used by another process
258 *
259 * now we're ready to make few consequences:
260 * - PA is referenced and while it is no discard is possible
261 * - PA is referenced until block isn't marked in on-disk bitmap
262 * - PA changes only after on-disk bitmap
263 * - discard must not compete with init. either init is done before
264 * any discard or they're serialized somehow
265 * - buddy init as sum of on-disk bitmap and PAs is done atomically
266 *
267 * a special case when we've used PA to emptiness. no need to modify buddy
268 * in this case, but we should care about concurrent init
269 *
270 */
271
272 /*
273 * Logic in few words:
274 *
275 * - allocation:
276 * load group
277 * find blocks
278 * mark bits in on-disk bitmap
279 * release group
280 *
281 * - use preallocation:
282 * find proper PA (per-inode or group)
283 * load group
284 * mark bits in on-disk bitmap
285 * release group
286 * release PA
287 *
288 * - free:
289 * load group
290 * mark bits in on-disk bitmap
291 * release group
292 *
293 * - discard preallocations in group:
294 * mark PAs deleted
295 * move them onto local list
296 * load on-disk bitmap
297 * load group
298 * remove PA from object (inode or locality group)
299 * mark free blocks in-core
300 *
301 * - discard inode's preallocations:
302 */
303
304/*
305 * Locking rules
306 *
307 * Locks:
308 * - bitlock on a group (group)
309 * - object (inode/locality) (object)
310 * - per-pa lock (pa)
311 *
312 * Paths:
313 * - new pa
314 * object
315 * group
316 *
317 * - find and use pa:
318 * pa
319 *
320 * - release consumed pa:
321 * pa
322 * group
323 * object
324 *
325 * - generate in-core bitmap:
326 * group
327 * pa
328 *
329 * - discard all for given object (inode, locality group):
330 * object
331 * pa
332 * group
333 *
334 * - discard all for given group:
335 * group
336 * pa
337 * group
338 * object
339 *
340 */
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500341static struct kmem_cache *ext4_pspace_cachep;
342static struct kmem_cache *ext4_ac_cachep;
Bobi Jam18aadd42012-02-20 17:53:02 -0500343static struct kmem_cache *ext4_free_data_cachep;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400344
345/* We create slab caches for groupinfo data structures based on the
346 * superblock block size. There will be one per mounted filesystem for
347 * each unique s_blocksize_bits */
Eric Sandeen2892c152011-02-12 08:12:18 -0500348#define NR_GRPINFO_CACHES 8
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400349static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
350
Eric Sandeen2892c152011-02-12 08:12:18 -0500351static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
352 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
353 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
354 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
355};
356
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500357static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
358 ext4_group_t group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500359static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
360 ext4_group_t group);
Bobi Jam18aadd42012-02-20 17:53:02 -0500361static void ext4_free_data_callback(struct super_block *sb,
362 struct ext4_journal_cb_entry *jce, int rc);
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500363
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500364static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
365{
Alex Tomasc9de5602008-01-29 00:19:52 -0500366#if BITS_PER_LONG == 64
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500367 *bit += ((unsigned long) addr & 7UL) << 3;
368 addr = (void *) ((unsigned long) addr & ~7UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500369#elif BITS_PER_LONG == 32
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500370 *bit += ((unsigned long) addr & 3UL) << 3;
371 addr = (void *) ((unsigned long) addr & ~3UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500372#else
373#error "how many bits you are?!"
374#endif
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500375 return addr;
376}
Alex Tomasc9de5602008-01-29 00:19:52 -0500377
378static inline int mb_test_bit(int bit, void *addr)
379{
380 /*
381 * ext4_test_bit on architecture like powerpc
382 * needs unsigned long aligned address
383 */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500384 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500385 return ext4_test_bit(bit, addr);
386}
387
388static inline void mb_set_bit(int bit, void *addr)
389{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500390 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500391 ext4_set_bit(bit, addr);
392}
393
Alex Tomasc9de5602008-01-29 00:19:52 -0500394static inline void mb_clear_bit(int bit, void *addr)
395{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500396 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500397 ext4_clear_bit(bit, addr);
398}
399
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500400static inline int mb_find_next_zero_bit(void *addr, int max, int start)
401{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400402 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500403 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400404 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500405 start += fix;
406
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400407 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
408 if (ret > max)
409 return max;
410 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500411}
412
413static inline int mb_find_next_bit(void *addr, int max, int start)
414{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400415 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500416 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400417 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500418 start += fix;
419
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400420 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
421 if (ret > max)
422 return max;
423 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500424}
425
Alex Tomasc9de5602008-01-29 00:19:52 -0500426static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
427{
428 char *bb;
429
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500430 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -0500431 BUG_ON(max == NULL);
432
433 if (order > e4b->bd_blkbits + 1) {
434 *max = 0;
435 return NULL;
436 }
437
438 /* at order 0 we see each particular block */
Coly Li84b775a2011-02-24 12:51:59 -0500439 if (order == 0) {
440 *max = 1 << (e4b->bd_blkbits + 3);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500441 return e4b->bd_bitmap;
Coly Li84b775a2011-02-24 12:51:59 -0500442 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500443
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500444 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
Alex Tomasc9de5602008-01-29 00:19:52 -0500445 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
446
447 return bb;
448}
449
450#ifdef DOUBLE_CHECK
451static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
452 int first, int count)
453{
454 int i;
455 struct super_block *sb = e4b->bd_sb;
456
457 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
458 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400459 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500460 for (i = 0; i < count; i++) {
461 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
462 ext4_fsblk_t blocknr;
Akinobu Mita5661bd62010-03-03 23:53:39 -0500463
464 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
Theodore Ts'o53accfa2011-09-09 18:48:51 -0400465 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -0500466 ext4_grp_locked_error(sb, e4b->bd_group,
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400467 inode ? inode->i_ino : 0,
468 blocknr,
469 "freeing block already freed "
470 "(bit %u)",
471 first + i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500472 }
473 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
474 }
475}
476
477static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
478{
479 int i;
480
481 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
482 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400483 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500484 for (i = 0; i < count; i++) {
485 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
486 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
487 }
488}
489
490static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
491{
492 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
493 unsigned char *b1, *b2;
494 int i;
495 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
496 b2 = (unsigned char *) bitmap;
497 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
498 if (b1[i] != b2[i]) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -0400499 ext4_msg(e4b->bd_sb, KERN_ERR,
500 "corruption in group %u "
501 "at byte %u(%u): %x in copy != %x "
502 "on disk/prealloc",
503 e4b->bd_group, i, i * 8, b1[i], b2[i]);
Alex Tomasc9de5602008-01-29 00:19:52 -0500504 BUG();
505 }
506 }
507 }
508}
509
510#else
511static inline void mb_free_blocks_double(struct inode *inode,
512 struct ext4_buddy *e4b, int first, int count)
513{
514 return;
515}
516static inline void mb_mark_used_double(struct ext4_buddy *e4b,
517 int first, int count)
518{
519 return;
520}
521static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
522{
523 return;
524}
525#endif
526
527#ifdef AGGRESSIVE_CHECK
528
529#define MB_CHECK_ASSERT(assert) \
530do { \
531 if (!(assert)) { \
532 printk(KERN_EMERG \
533 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
534 function, file, line, # assert); \
535 BUG(); \
536 } \
537} while (0)
538
539static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
540 const char *function, int line)
541{
542 struct super_block *sb = e4b->bd_sb;
543 int order = e4b->bd_blkbits + 1;
544 int max;
545 int max2;
546 int i;
547 int j;
548 int k;
549 int count;
550 struct ext4_group_info *grp;
551 int fragments = 0;
552 int fstart;
553 struct list_head *cur;
554 void *buddy;
555 void *buddy2;
556
Alex Tomasc9de5602008-01-29 00:19:52 -0500557 {
558 static int mb_check_counter;
559 if (mb_check_counter++ % 100 != 0)
560 return 0;
561 }
562
563 while (order > 1) {
564 buddy = mb_find_buddy(e4b, order, &max);
565 MB_CHECK_ASSERT(buddy);
566 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
567 MB_CHECK_ASSERT(buddy2);
568 MB_CHECK_ASSERT(buddy != buddy2);
569 MB_CHECK_ASSERT(max * 2 == max2);
570
571 count = 0;
572 for (i = 0; i < max; i++) {
573
574 if (mb_test_bit(i, buddy)) {
575 /* only single bit in buddy2 may be 1 */
576 if (!mb_test_bit(i << 1, buddy2)) {
577 MB_CHECK_ASSERT(
578 mb_test_bit((i<<1)+1, buddy2));
579 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
580 MB_CHECK_ASSERT(
581 mb_test_bit(i << 1, buddy2));
582 }
583 continue;
584 }
585
Robin Dong0a10da72011-10-26 08:48:54 -0400586 /* both bits in buddy2 must be 1 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500587 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
588 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
589
590 for (j = 0; j < (1 << order); j++) {
591 k = (i * (1 << order)) + j;
592 MB_CHECK_ASSERT(
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500593 !mb_test_bit(k, e4b->bd_bitmap));
Alex Tomasc9de5602008-01-29 00:19:52 -0500594 }
595 count++;
596 }
597 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
598 order--;
599 }
600
601 fstart = -1;
602 buddy = mb_find_buddy(e4b, 0, &max);
603 for (i = 0; i < max; i++) {
604 if (!mb_test_bit(i, buddy)) {
605 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
606 if (fstart == -1) {
607 fragments++;
608 fstart = i;
609 }
610 continue;
611 }
612 fstart = -1;
613 /* check used bits only */
614 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
615 buddy2 = mb_find_buddy(e4b, j, &max2);
616 k = i >> j;
617 MB_CHECK_ASSERT(k < max2);
618 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
619 }
620 }
621 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
622 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
623
624 grp = ext4_get_group_info(sb, e4b->bd_group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500625 list_for_each(cur, &grp->bb_prealloc_list) {
626 ext4_group_t groupnr;
627 struct ext4_prealloc_space *pa;
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400628 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
629 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
Alex Tomasc9de5602008-01-29 00:19:52 -0500630 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400631 for (i = 0; i < pa->pa_len; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500632 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
633 }
634 return 0;
635}
636#undef MB_CHECK_ASSERT
637#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
Harvey Harrison46e665e2008-04-17 10:38:59 -0400638 __FILE__, __func__, __LINE__)
Alex Tomasc9de5602008-01-29 00:19:52 -0500639#else
640#define mb_check_buddy(e4b)
641#endif
642
Coly Li7c786052011-02-24 13:24:25 -0500643/*
644 * Divide blocks started from @first with length @len into
645 * smaller chunks with power of 2 blocks.
646 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
647 * then increase bb_counters[] for corresponded chunk size.
648 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500649static void ext4_mb_mark_free_simple(struct super_block *sb,
Eric Sandeena36b4492009-08-25 22:36:45 -0400650 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
Alex Tomasc9de5602008-01-29 00:19:52 -0500651 struct ext4_group_info *grp)
652{
653 struct ext4_sb_info *sbi = EXT4_SB(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400654 ext4_grpblk_t min;
655 ext4_grpblk_t max;
656 ext4_grpblk_t chunk;
Alex Tomasc9de5602008-01-29 00:19:52 -0500657 unsigned short border;
658
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400659 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
Alex Tomasc9de5602008-01-29 00:19:52 -0500660
661 border = 2 << sb->s_blocksize_bits;
662
663 while (len > 0) {
664 /* find how many blocks can be covered since this position */
665 max = ffs(first | border) - 1;
666
667 /* find how many blocks of power 2 we need to mark */
668 min = fls(len) - 1;
669
670 if (max < min)
671 min = max;
672 chunk = 1 << min;
673
674 /* mark multiblock chunks only */
675 grp->bb_counters[min]++;
676 if (min > 0)
677 mb_clear_bit(first >> min,
678 buddy + sbi->s_mb_offsets[min]);
679
680 len -= chunk;
681 first += chunk;
682 }
683}
684
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400685/*
686 * Cache the order of the largest free extent we have available in this block
687 * group.
688 */
689static void
690mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
691{
692 int i;
693 int bits;
694
695 grp->bb_largest_free_order = -1; /* uninit */
696
697 bits = sb->s_blocksize_bits + 1;
698 for (i = bits; i >= 0; i--) {
699 if (grp->bb_counters[i] > 0) {
700 grp->bb_largest_free_order = i;
701 break;
702 }
703 }
704}
705
Eric Sandeen089ceec2009-07-05 22:17:31 -0400706static noinline_for_stack
707void ext4_mb_generate_buddy(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -0500708 void *buddy, void *bitmap, ext4_group_t group)
709{
710 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400711 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400712 ext4_grpblk_t i = 0;
713 ext4_grpblk_t first;
714 ext4_grpblk_t len;
Alex Tomasc9de5602008-01-29 00:19:52 -0500715 unsigned free = 0;
716 unsigned fragments = 0;
717 unsigned long long period = get_cycles();
718
719 /* initialize buddy from bitmap which is aggregation
720 * of on-disk bitmap and preallocations */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500721 i = mb_find_next_zero_bit(bitmap, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -0500722 grp->bb_first_free = i;
723 while (i < max) {
724 fragments++;
725 first = i;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500726 i = mb_find_next_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500727 len = i - first;
728 free += len;
729 if (len > 1)
730 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
731 else
732 grp->bb_counters[0]++;
733 if (i < max)
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500734 i = mb_find_next_zero_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500735 }
736 grp->bb_fragments = fragments;
737
738 if (free != grp->bb_free) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400739 ext4_grp_locked_error(sb, group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -0400740 "%u clusters in bitmap, %u in gd",
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400741 free, grp->bb_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -0500742 /*
743 * If we intent to continue, we consider group descritor
744 * corrupt and update bb_free using bitmap value
745 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500746 grp->bb_free = free;
747 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400748 mb_set_largest_free_order(sb, grp);
Alex Tomasc9de5602008-01-29 00:19:52 -0500749
750 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
751
752 period = get_cycles() - period;
753 spin_lock(&EXT4_SB(sb)->s_bal_lock);
754 EXT4_SB(sb)->s_mb_buddies_generated++;
755 EXT4_SB(sb)->s_mb_generation_time += period;
756 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
757}
758
759/* The buddy information is attached the buddy cache inode
760 * for convenience. The information regarding each group
761 * is loaded via ext4_mb_load_buddy. The information involve
762 * block bitmap and buddy information. The information are
763 * stored in the inode as
764 *
765 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500766 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500767 *
768 *
769 * one block each for bitmap and buddy information.
770 * So for each group we take up 2 blocks. A page can
771 * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
772 * So it can have information regarding groups_per_page which
773 * is blocks_per_page/2
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400774 *
775 * Locking note: This routine takes the block group lock of all groups
776 * for this page; do not hold this lock when calling this routine!
Alex Tomasc9de5602008-01-29 00:19:52 -0500777 */
778
779static int ext4_mb_init_cache(struct page *page, char *incore)
780{
Theodore Ts'o8df96752009-05-01 08:50:38 -0400781 ext4_group_t ngroups;
Alex Tomasc9de5602008-01-29 00:19:52 -0500782 int blocksize;
783 int blocks_per_page;
784 int groups_per_page;
785 int err = 0;
786 int i;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500787 ext4_group_t first_group, group;
Alex Tomasc9de5602008-01-29 00:19:52 -0500788 int first_block;
789 struct super_block *sb;
790 struct buffer_head *bhs;
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -0400791 struct buffer_head **bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -0500792 struct inode *inode;
793 char *data;
794 char *bitmap;
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400795 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -0500796
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400797 mb_debug(1, "init page %lu\n", page->index);
Alex Tomasc9de5602008-01-29 00:19:52 -0500798
799 inode = page->mapping->host;
800 sb = inode->i_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400801 ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -0500802 blocksize = 1 << inode->i_blkbits;
803 blocks_per_page = PAGE_CACHE_SIZE / blocksize;
804
805 groups_per_page = blocks_per_page >> 1;
806 if (groups_per_page == 0)
807 groups_per_page = 1;
808
809 /* allocate buffer_heads to read bitmaps */
810 if (groups_per_page > 1) {
Alex Tomasc9de5602008-01-29 00:19:52 -0500811 i = sizeof(struct buffer_head *) * groups_per_page;
812 bh = kzalloc(i, GFP_NOFS);
Theodore Ts'o813e5722012-02-20 17:52:46 -0500813 if (bh == NULL) {
814 err = -ENOMEM;
Alex Tomasc9de5602008-01-29 00:19:52 -0500815 goto out;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500816 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500817 } else
818 bh = &bhs;
819
820 first_group = page->index * blocks_per_page / 2;
821
822 /* read all groups the page covers into the cache */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500823 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
824 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500825 break;
826
Theodore Ts'o813e5722012-02-20 17:52:46 -0500827 grinfo = ext4_get_group_info(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400828 /*
829 * If page is uptodate then we came here after online resize
830 * which added some new uninitialized group info structs, so
831 * we must skip all initialized uptodate buddies on the page,
832 * which may be currently in use by an allocating task.
833 */
834 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
835 bh[i] = NULL;
836 continue;
837 }
Theodore Ts'o813e5722012-02-20 17:52:46 -0500838 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
839 err = -ENOMEM;
Alex Tomasc9de5602008-01-29 00:19:52 -0500840 goto out;
Aneesh Kumar K.V2ccb5fb2009-01-05 21:49:55 -0500841 }
Theodore Ts'o813e5722012-02-20 17:52:46 -0500842 mb_debug(1, "read bitmap for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500843 }
844
845 /* wait for I/O completion */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500846 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
847 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
848 err = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -0500849 goto out;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500850 }
851 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500852
853 first_block = page->index * blocks_per_page;
854 for (i = 0; i < blocks_per_page; i++) {
855 int group;
Alex Tomasc9de5602008-01-29 00:19:52 -0500856
857 group = (first_block + i) >> 1;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400858 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500859 break;
860
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400861 if (!bh[group - first_group])
862 /* skip initialized uptodate buddy */
863 continue;
864
Alex Tomasc9de5602008-01-29 00:19:52 -0500865 /*
866 * data carry information regarding this
867 * particular group in the format specified
868 * above
869 *
870 */
871 data = page_address(page) + (i * blocksize);
872 bitmap = bh[group - first_group]->b_data;
873
874 /*
875 * We place the buddy block and bitmap block
876 * close together
877 */
878 if ((first_block + i) & 1) {
879 /* this is block of buddy */
880 BUG_ON(incore == NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400881 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500882 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400883 trace_ext4_mb_buddy_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500884 grinfo = ext4_get_group_info(sb, group);
885 grinfo->bb_fragments = 0;
886 memset(grinfo->bb_counters, 0,
Eric Sandeen19278052009-08-25 22:36:25 -0400887 sizeof(*grinfo->bb_counters) *
888 (sb->s_blocksize_bits+2));
Alex Tomasc9de5602008-01-29 00:19:52 -0500889 /*
890 * incore got set to the group block bitmap below
891 */
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500892 ext4_lock_group(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400893 /* init the buddy */
894 memset(data, 0xff, blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -0500895 ext4_mb_generate_buddy(sb, data, incore, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500896 ext4_unlock_group(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500897 incore = NULL;
898 } else {
899 /* this is block of bitmap */
900 BUG_ON(incore != NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400901 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500902 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400903 trace_ext4_mb_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500904
905 /* see comments in ext4_mb_put_pa() */
906 ext4_lock_group(sb, group);
907 memcpy(data, bitmap, blocksize);
908
909 /* mark all preallocated blks used in in-core bitmap */
910 ext4_mb_generate_from_pa(sb, data, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500911 ext4_mb_generate_from_freelist(sb, data, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500912 ext4_unlock_group(sb, group);
913
914 /* set incore so that the buddy information can be
915 * generated using this
916 */
917 incore = data;
918 }
919 }
920 SetPageUptodate(page);
921
922out:
923 if (bh) {
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400924 for (i = 0; i < groups_per_page; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500925 brelse(bh[i]);
926 if (bh != &bhs)
927 kfree(bh);
928 }
929 return err;
930}
931
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400932/*
Amir Goldstein2de88072011-05-09 21:48:13 -0400933 * Lock the buddy and bitmap pages. This make sure other parallel init_group
934 * on the same buddy page doesn't happen whild holding the buddy page lock.
935 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
936 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400937 */
Amir Goldstein2de88072011-05-09 21:48:13 -0400938static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
939 ext4_group_t group, struct ext4_buddy *e4b)
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400940{
Amir Goldstein2de88072011-05-09 21:48:13 -0400941 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
942 int block, pnum, poff;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400943 int blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -0400944 struct page *page;
945
946 e4b->bd_buddy_page = NULL;
947 e4b->bd_bitmap_page = NULL;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400948
949 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
950 /*
951 * the buddy cache inode stores the block bitmap
952 * and buddy information in consecutive blocks.
953 * So for each group we need two blocks.
954 */
955 block = group * 2;
956 pnum = block / blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -0400957 poff = block % blocks_per_page;
958 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
959 if (!page)
960 return -EIO;
961 BUG_ON(page->mapping != inode->i_mapping);
962 e4b->bd_bitmap_page = page;
963 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400964
Amir Goldstein2de88072011-05-09 21:48:13 -0400965 if (blocks_per_page >= 2) {
966 /* buddy and bitmap are on the same page */
967 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400968 }
Amir Goldstein2de88072011-05-09 21:48:13 -0400969
970 block++;
971 pnum = block / blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -0400972 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
973 if (!page)
974 return -EIO;
975 BUG_ON(page->mapping != inode->i_mapping);
976 e4b->bd_buddy_page = page;
977 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400978}
979
Amir Goldstein2de88072011-05-09 21:48:13 -0400980static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400981{
Amir Goldstein2de88072011-05-09 21:48:13 -0400982 if (e4b->bd_bitmap_page) {
983 unlock_page(e4b->bd_bitmap_page);
984 page_cache_release(e4b->bd_bitmap_page);
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400985 }
Amir Goldstein2de88072011-05-09 21:48:13 -0400986 if (e4b->bd_buddy_page) {
987 unlock_page(e4b->bd_buddy_page);
988 page_cache_release(e4b->bd_buddy_page);
989 }
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400990}
991
992/*
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400993 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
994 * block group lock of all groups for this page; do not hold the BG lock when
995 * calling this routine!
996 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -0400997static noinline_for_stack
998int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
999{
1000
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001001 struct ext4_group_info *this_grp;
Amir Goldstein2de88072011-05-09 21:48:13 -04001002 struct ext4_buddy e4b;
1003 struct page *page;
1004 int ret = 0;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001005
1006 mb_debug(1, "init group %u\n", group);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001007 this_grp = ext4_get_group_info(sb, group);
1008 /*
Aneesh Kumar K.V08c3a812009-09-09 23:50:17 -04001009 * This ensures that we don't reinit the buddy cache
1010 * page which map to the group from which we are already
1011 * allocating. If we are looking at the buddy cache we would
1012 * have taken a reference using ext4_mb_load_buddy and that
Amir Goldstein2de88072011-05-09 21:48:13 -04001013 * would have pinned buddy page to page cache.
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001014 */
Amir Goldstein2de88072011-05-09 21:48:13 -04001015 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
1016 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001017 /*
1018 * somebody initialized the group
1019 * return without doing anything
1020 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001021 goto err;
1022 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001023
1024 page = e4b.bd_bitmap_page;
1025 ret = ext4_mb_init_cache(page, NULL);
1026 if (ret)
1027 goto err;
1028 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001029 ret = -EIO;
1030 goto err;
1031 }
1032 mark_page_accessed(page);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001033
Amir Goldstein2de88072011-05-09 21:48:13 -04001034 if (e4b.bd_buddy_page == NULL) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001035 /*
1036 * If both the bitmap and buddy are in
1037 * the same page we don't need to force
1038 * init the buddy
1039 */
Amir Goldstein2de88072011-05-09 21:48:13 -04001040 ret = 0;
1041 goto err;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001042 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001043 /* init buddy cache */
1044 page = e4b.bd_buddy_page;
1045 ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
1046 if (ret)
1047 goto err;
1048 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001049 ret = -EIO;
1050 goto err;
1051 }
1052 mark_page_accessed(page);
1053err:
Amir Goldstein2de88072011-05-09 21:48:13 -04001054 ext4_mb_put_buddy_page_lock(&e4b);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001055 return ret;
1056}
1057
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001058/*
1059 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1060 * block group lock of all groups for this page; do not hold the BG lock when
1061 * calling this routine!
1062 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04001063static noinline_for_stack int
1064ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1065 struct ext4_buddy *e4b)
Alex Tomasc9de5602008-01-29 00:19:52 -05001066{
Alex Tomasc9de5602008-01-29 00:19:52 -05001067 int blocks_per_page;
1068 int block;
1069 int pnum;
1070 int poff;
1071 struct page *page;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001072 int ret;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001073 struct ext4_group_info *grp;
1074 struct ext4_sb_info *sbi = EXT4_SB(sb);
1075 struct inode *inode = sbi->s_buddy_cache;
Alex Tomasc9de5602008-01-29 00:19:52 -05001076
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04001077 mb_debug(1, "load group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001078
1079 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001080 grp = ext4_get_group_info(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001081
1082 e4b->bd_blkbits = sb->s_blocksize_bits;
Tao Ma529da702011-07-23 16:07:26 -04001083 e4b->bd_info = grp;
Alex Tomasc9de5602008-01-29 00:19:52 -05001084 e4b->bd_sb = sb;
1085 e4b->bd_group = group;
1086 e4b->bd_buddy_page = NULL;
1087 e4b->bd_bitmap_page = NULL;
1088
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001089 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001090 /*
1091 * we need full data about the group
1092 * to make a good selection
1093 */
1094 ret = ext4_mb_init_group(sb, group);
1095 if (ret)
1096 return ret;
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001097 }
1098
Alex Tomasc9de5602008-01-29 00:19:52 -05001099 /*
1100 * the buddy cache inode stores the block bitmap
1101 * and buddy information in consecutive blocks.
1102 * So for each group we need two blocks.
1103 */
1104 block = group * 2;
1105 pnum = block / blocks_per_page;
1106 poff = block % blocks_per_page;
1107
1108 /* we could use find_or_create_page(), but it locks page
1109 * what we'd like to avoid in fast path ... */
1110 page = find_get_page(inode->i_mapping, pnum);
1111 if (page == NULL || !PageUptodate(page)) {
1112 if (page)
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001113 /*
1114 * drop the page reference and try
1115 * to get the page with lock. If we
1116 * are not uptodate that implies
1117 * somebody just created the page but
1118 * is yet to initialize the same. So
1119 * wait for it to initialize.
1120 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001121 page_cache_release(page);
1122 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1123 if (page) {
1124 BUG_ON(page->mapping != inode->i_mapping);
1125 if (!PageUptodate(page)) {
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001126 ret = ext4_mb_init_cache(page, NULL);
1127 if (ret) {
1128 unlock_page(page);
1129 goto err;
1130 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001131 mb_cmp_bitmaps(e4b, page_address(page) +
1132 (poff * sb->s_blocksize));
1133 }
1134 unlock_page(page);
1135 }
1136 }
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001137 if (page == NULL || !PageUptodate(page)) {
1138 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001139 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001140 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001141 e4b->bd_bitmap_page = page;
1142 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1143 mark_page_accessed(page);
1144
1145 block++;
1146 pnum = block / blocks_per_page;
1147 poff = block % blocks_per_page;
1148
1149 page = find_get_page(inode->i_mapping, pnum);
1150 if (page == NULL || !PageUptodate(page)) {
1151 if (page)
1152 page_cache_release(page);
1153 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1154 if (page) {
1155 BUG_ON(page->mapping != inode->i_mapping);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001156 if (!PageUptodate(page)) {
1157 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
1158 if (ret) {
1159 unlock_page(page);
1160 goto err;
1161 }
1162 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001163 unlock_page(page);
1164 }
1165 }
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001166 if (page == NULL || !PageUptodate(page)) {
1167 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001168 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001169 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001170 e4b->bd_buddy_page = page;
1171 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1172 mark_page_accessed(page);
1173
1174 BUG_ON(e4b->bd_bitmap_page == NULL);
1175 BUG_ON(e4b->bd_buddy_page == NULL);
1176
1177 return 0;
1178
1179err:
Yang Ruirui26626f112011-04-16 19:17:48 -04001180 if (page)
1181 page_cache_release(page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001182 if (e4b->bd_bitmap_page)
1183 page_cache_release(e4b->bd_bitmap_page);
1184 if (e4b->bd_buddy_page)
1185 page_cache_release(e4b->bd_buddy_page);
1186 e4b->bd_buddy = NULL;
1187 e4b->bd_bitmap = NULL;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001188 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05001189}
1190
Jing Zhange39e07f2010-05-14 00:00:00 -04001191static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
Alex Tomasc9de5602008-01-29 00:19:52 -05001192{
1193 if (e4b->bd_bitmap_page)
1194 page_cache_release(e4b->bd_bitmap_page);
1195 if (e4b->bd_buddy_page)
1196 page_cache_release(e4b->bd_buddy_page);
1197}
1198
1199
1200static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1201{
1202 int order = 1;
1203 void *bb;
1204
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001205 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -05001206 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1207
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001208 bb = e4b->bd_buddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05001209 while (order <= e4b->bd_blkbits + 1) {
1210 block = block >> 1;
1211 if (!mb_test_bit(block, bb)) {
1212 /* this block is part of buddy of order 'order' */
1213 return order;
1214 }
1215 bb += 1 << (e4b->bd_blkbits - order);
1216 order++;
1217 }
1218 return 0;
1219}
1220
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001221static void mb_clear_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001222{
1223 __u32 *addr;
1224
1225 len = cur + len;
1226 while (cur < len) {
1227 if ((cur & 31) == 0 && (len - cur) >= 32) {
1228 /* fast path: clear whole word at once */
1229 addr = bm + (cur >> 3);
1230 *addr = 0;
1231 cur += 32;
1232 continue;
1233 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001234 mb_clear_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001235 cur++;
1236 }
1237}
1238
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04001239void ext4_set_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001240{
1241 __u32 *addr;
1242
1243 len = cur + len;
1244 while (cur < len) {
1245 if ((cur & 31) == 0 && (len - cur) >= 32) {
1246 /* fast path: set whole word at once */
1247 addr = bm + (cur >> 3);
1248 *addr = 0xffffffff;
1249 cur += 32;
1250 continue;
1251 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001252 mb_set_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001253 cur++;
1254 }
1255}
1256
Shen Feng7e5a8cd2008-07-13 21:03:31 -04001257static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
Alex Tomasc9de5602008-01-29 00:19:52 -05001258 int first, int count)
1259{
1260 int block = 0;
1261 int max = 0;
1262 int order;
1263 void *buddy;
1264 void *buddy2;
1265 struct super_block *sb = e4b->bd_sb;
1266
1267 BUG_ON(first + count > (sb->s_blocksize << 3));
Vincent Minetbc8e6742009-05-15 08:33:18 -04001268 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001269 mb_check_buddy(e4b);
1270 mb_free_blocks_double(inode, e4b, first, count);
1271
1272 e4b->bd_info->bb_free += count;
1273 if (first < e4b->bd_info->bb_first_free)
1274 e4b->bd_info->bb_first_free = first;
1275
1276 /* let's maintain fragments counter */
1277 if (first != 0)
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001278 block = !mb_test_bit(first - 1, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001279 if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001280 max = !mb_test_bit(first + count, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001281 if (block && max)
1282 e4b->bd_info->bb_fragments--;
1283 else if (!block && !max)
1284 e4b->bd_info->bb_fragments++;
1285
1286 /* let's maintain buddy itself */
1287 while (count-- > 0) {
1288 block = first++;
1289 order = 0;
1290
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001291 if (!mb_test_bit(block, e4b->bd_bitmap)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001292 ext4_fsblk_t blocknr;
Akinobu Mita5661bd62010-03-03 23:53:39 -05001293
1294 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001295 blocknr += EXT4_C2B(EXT4_SB(sb), block);
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -05001296 ext4_grp_locked_error(sb, e4b->bd_group,
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001297 inode ? inode->i_ino : 0,
1298 blocknr,
1299 "freeing already freed block "
1300 "(bit %u)", block);
Alex Tomasc9de5602008-01-29 00:19:52 -05001301 }
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001302 mb_clear_bit(block, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001303 e4b->bd_info->bb_counters[order]++;
1304
1305 /* start of the buddy */
1306 buddy = mb_find_buddy(e4b, order, &max);
1307
1308 do {
1309 block &= ~1UL;
1310 if (mb_test_bit(block, buddy) ||
1311 mb_test_bit(block + 1, buddy))
1312 break;
1313
1314 /* both the buddies are free, try to coalesce them */
1315 buddy2 = mb_find_buddy(e4b, order + 1, &max);
1316
1317 if (!buddy2)
1318 break;
1319
1320 if (order > 0) {
1321 /* for special purposes, we don't set
1322 * free bits in bitmap */
1323 mb_set_bit(block, buddy);
1324 mb_set_bit(block + 1, buddy);
1325 }
1326 e4b->bd_info->bb_counters[order]--;
1327 e4b->bd_info->bb_counters[order]--;
1328
1329 block = block >> 1;
1330 order++;
1331 e4b->bd_info->bb_counters[order]++;
1332
1333 mb_clear_bit(block, buddy2);
1334 buddy = buddy2;
1335 } while (1);
1336 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001337 mb_set_largest_free_order(sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001338 mb_check_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001339}
1340
1341static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1342 int needed, struct ext4_free_extent *ex)
1343{
1344 int next = block;
1345 int max;
Alex Tomasc9de5602008-01-29 00:19:52 -05001346 void *buddy;
1347
Vincent Minetbc8e6742009-05-15 08:33:18 -04001348 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001349 BUG_ON(ex == NULL);
1350
1351 buddy = mb_find_buddy(e4b, order, &max);
1352 BUG_ON(buddy == NULL);
1353 BUG_ON(block >= max);
1354 if (mb_test_bit(block, buddy)) {
1355 ex->fe_len = 0;
1356 ex->fe_start = 0;
1357 ex->fe_group = 0;
1358 return 0;
1359 }
1360
1361 /* FIXME dorp order completely ? */
1362 if (likely(order == 0)) {
1363 /* find actual order */
1364 order = mb_find_order_for_block(e4b, block);
1365 block = block >> order;
1366 }
1367
1368 ex->fe_len = 1 << order;
1369 ex->fe_start = block << order;
1370 ex->fe_group = e4b->bd_group;
1371
1372 /* calc difference from given start */
1373 next = next - ex->fe_start;
1374 ex->fe_len -= next;
1375 ex->fe_start += next;
1376
1377 while (needed > ex->fe_len &&
1378 (buddy = mb_find_buddy(e4b, order, &max))) {
1379
1380 if (block + 1 >= max)
1381 break;
1382
1383 next = (block + 1) * (1 << order);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001384 if (mb_test_bit(next, e4b->bd_bitmap))
Alex Tomasc9de5602008-01-29 00:19:52 -05001385 break;
1386
Robin Dongb051d8d2011-10-26 05:30:30 -04001387 order = mb_find_order_for_block(e4b, next);
Alex Tomasc9de5602008-01-29 00:19:52 -05001388
Alex Tomasc9de5602008-01-29 00:19:52 -05001389 block = next >> order;
1390 ex->fe_len += 1 << order;
1391 }
1392
1393 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1394 return ex->fe_len;
1395}
1396
1397static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1398{
1399 int ord;
1400 int mlen = 0;
1401 int max = 0;
1402 int cur;
1403 int start = ex->fe_start;
1404 int len = ex->fe_len;
1405 unsigned ret = 0;
1406 int len0 = len;
1407 void *buddy;
1408
1409 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1410 BUG_ON(e4b->bd_group != ex->fe_group);
Vincent Minetbc8e6742009-05-15 08:33:18 -04001411 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001412 mb_check_buddy(e4b);
1413 mb_mark_used_double(e4b, start, len);
1414
1415 e4b->bd_info->bb_free -= len;
1416 if (e4b->bd_info->bb_first_free == start)
1417 e4b->bd_info->bb_first_free += len;
1418
1419 /* let's maintain fragments counter */
1420 if (start != 0)
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001421 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001422 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001423 max = !mb_test_bit(start + len, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001424 if (mlen && max)
1425 e4b->bd_info->bb_fragments++;
1426 else if (!mlen && !max)
1427 e4b->bd_info->bb_fragments--;
1428
1429 /* let's maintain buddy itself */
1430 while (len) {
1431 ord = mb_find_order_for_block(e4b, start);
1432
1433 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1434 /* the whole chunk may be allocated at once! */
1435 mlen = 1 << ord;
1436 buddy = mb_find_buddy(e4b, ord, &max);
1437 BUG_ON((start >> ord) >= max);
1438 mb_set_bit(start >> ord, buddy);
1439 e4b->bd_info->bb_counters[ord]--;
1440 start += mlen;
1441 len -= mlen;
1442 BUG_ON(len < 0);
1443 continue;
1444 }
1445
1446 /* store for history */
1447 if (ret == 0)
1448 ret = len | (ord << 16);
1449
1450 /* we have to split large buddy */
1451 BUG_ON(ord <= 0);
1452 buddy = mb_find_buddy(e4b, ord, &max);
1453 mb_set_bit(start >> ord, buddy);
1454 e4b->bd_info->bb_counters[ord]--;
1455
1456 ord--;
1457 cur = (start >> ord) & ~1U;
1458 buddy = mb_find_buddy(e4b, ord, &max);
1459 mb_clear_bit(cur, buddy);
1460 mb_clear_bit(cur + 1, buddy);
1461 e4b->bd_info->bb_counters[ord]++;
1462 e4b->bd_info->bb_counters[ord]++;
1463 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001464 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001465
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001466 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001467 mb_check_buddy(e4b);
1468
1469 return ret;
1470}
1471
1472/*
1473 * Must be called under group lock!
1474 */
1475static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1476 struct ext4_buddy *e4b)
1477{
1478 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1479 int ret;
1480
1481 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1482 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1483
1484 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1485 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1486 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1487
1488 /* preallocation can change ac_b_ex, thus we store actually
1489 * allocated blocks for history */
1490 ac->ac_f_ex = ac->ac_b_ex;
1491
1492 ac->ac_status = AC_STATUS_FOUND;
1493 ac->ac_tail = ret & 0xffff;
1494 ac->ac_buddy = ret >> 16;
1495
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -05001496 /*
1497 * take the page reference. We want the page to be pinned
1498 * so that we don't get a ext4_mb_init_cache_call for this
1499 * group until we update the bitmap. That would mean we
1500 * double allocate blocks. The reference is dropped
1501 * in ext4_mb_release_context
1502 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001503 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1504 get_page(ac->ac_bitmap_page);
1505 ac->ac_buddy_page = e4b->bd_buddy_page;
1506 get_page(ac->ac_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001507 /* store last allocated for subsequent stream allocation */
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04001508 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001509 spin_lock(&sbi->s_md_lock);
1510 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1511 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1512 spin_unlock(&sbi->s_md_lock);
1513 }
1514}
1515
1516/*
1517 * regular allocator, for general purposes allocation
1518 */
1519
1520static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1521 struct ext4_buddy *e4b,
1522 int finish_group)
1523{
1524 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1525 struct ext4_free_extent *bex = &ac->ac_b_ex;
1526 struct ext4_free_extent *gex = &ac->ac_g_ex;
1527 struct ext4_free_extent ex;
1528 int max;
1529
Aneesh Kumar K.V032115f2009-01-05 21:34:30 -05001530 if (ac->ac_status == AC_STATUS_FOUND)
1531 return;
Alex Tomasc9de5602008-01-29 00:19:52 -05001532 /*
1533 * We don't want to scan for a whole year
1534 */
1535 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1536 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1537 ac->ac_status = AC_STATUS_BREAK;
1538 return;
1539 }
1540
1541 /*
1542 * Haven't found good chunk so far, let's continue
1543 */
1544 if (bex->fe_len < gex->fe_len)
1545 return;
1546
1547 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1548 && bex->fe_group == e4b->bd_group) {
1549 /* recheck chunk's availability - we don't know
1550 * when it was found (within this lock-unlock
1551 * period or not) */
1552 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1553 if (max >= gex->fe_len) {
1554 ext4_mb_use_best_found(ac, e4b);
1555 return;
1556 }
1557 }
1558}
1559
1560/*
1561 * The routine checks whether found extent is good enough. If it is,
1562 * then the extent gets marked used and flag is set to the context
1563 * to stop scanning. Otherwise, the extent is compared with the
1564 * previous found extent and if new one is better, then it's stored
1565 * in the context. Later, the best found extent will be used, if
1566 * mballoc can't find good enough extent.
1567 *
1568 * FIXME: real allocation policy is to be designed yet!
1569 */
1570static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1571 struct ext4_free_extent *ex,
1572 struct ext4_buddy *e4b)
1573{
1574 struct ext4_free_extent *bex = &ac->ac_b_ex;
1575 struct ext4_free_extent *gex = &ac->ac_g_ex;
1576
1577 BUG_ON(ex->fe_len <= 0);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001578 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1579 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05001580 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1581
1582 ac->ac_found++;
1583
1584 /*
1585 * The special case - take what you catch first
1586 */
1587 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1588 *bex = *ex;
1589 ext4_mb_use_best_found(ac, e4b);
1590 return;
1591 }
1592
1593 /*
1594 * Let's check whether the chuck is good enough
1595 */
1596 if (ex->fe_len == gex->fe_len) {
1597 *bex = *ex;
1598 ext4_mb_use_best_found(ac, e4b);
1599 return;
1600 }
1601
1602 /*
1603 * If this is first found extent, just store it in the context
1604 */
1605 if (bex->fe_len == 0) {
1606 *bex = *ex;
1607 return;
1608 }
1609
1610 /*
1611 * If new found extent is better, store it in the context
1612 */
1613 if (bex->fe_len < gex->fe_len) {
1614 /* if the request isn't satisfied, any found extent
1615 * larger than previous best one is better */
1616 if (ex->fe_len > bex->fe_len)
1617 *bex = *ex;
1618 } else if (ex->fe_len > gex->fe_len) {
1619 /* if the request is satisfied, then we try to find
1620 * an extent that still satisfy the request, but is
1621 * smaller than previous one */
1622 if (ex->fe_len < bex->fe_len)
1623 *bex = *ex;
1624 }
1625
1626 ext4_mb_check_limits(ac, e4b, 0);
1627}
1628
Eric Sandeen089ceec2009-07-05 22:17:31 -04001629static noinline_for_stack
1630int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001631 struct ext4_buddy *e4b)
1632{
1633 struct ext4_free_extent ex = ac->ac_b_ex;
1634 ext4_group_t group = ex.fe_group;
1635 int max;
1636 int err;
1637
1638 BUG_ON(ex.fe_len <= 0);
1639 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1640 if (err)
1641 return err;
1642
1643 ext4_lock_group(ac->ac_sb, group);
1644 max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1645
1646 if (max > 0) {
1647 ac->ac_b_ex = ex;
1648 ext4_mb_use_best_found(ac, e4b);
1649 }
1650
1651 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001652 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001653
1654 return 0;
1655}
1656
Eric Sandeen089ceec2009-07-05 22:17:31 -04001657static noinline_for_stack
1658int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001659 struct ext4_buddy *e4b)
1660{
1661 ext4_group_t group = ac->ac_g_ex.fe_group;
1662 int max;
1663 int err;
1664 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05001665 struct ext4_free_extent ex;
1666
1667 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1668 return 0;
1669
1670 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1671 if (err)
1672 return err;
1673
1674 ext4_lock_group(ac->ac_sb, group);
1675 max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1676 ac->ac_g_ex.fe_len, &ex);
1677
1678 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1679 ext4_fsblk_t start;
1680
Akinobu Mita5661bd62010-03-03 23:53:39 -05001681 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1682 ex.fe_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05001683 /* use do_div to get remainder (would be 64-bit modulo) */
1684 if (do_div(start, sbi->s_stripe) == 0) {
1685 ac->ac_found++;
1686 ac->ac_b_ex = ex;
1687 ext4_mb_use_best_found(ac, e4b);
1688 }
1689 } else if (max >= ac->ac_g_ex.fe_len) {
1690 BUG_ON(ex.fe_len <= 0);
1691 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1692 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1693 ac->ac_found++;
1694 ac->ac_b_ex = ex;
1695 ext4_mb_use_best_found(ac, e4b);
1696 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1697 /* Sometimes, caller may want to merge even small
1698 * number of blocks to an existing extent */
1699 BUG_ON(ex.fe_len <= 0);
1700 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1701 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1702 ac->ac_found++;
1703 ac->ac_b_ex = ex;
1704 ext4_mb_use_best_found(ac, e4b);
1705 }
1706 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001707 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001708
1709 return 0;
1710}
1711
1712/*
1713 * The routine scans buddy structures (not bitmap!) from given order
1714 * to max order and tries to find big enough chunk to satisfy the req
1715 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001716static noinline_for_stack
1717void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001718 struct ext4_buddy *e4b)
1719{
1720 struct super_block *sb = ac->ac_sb;
1721 struct ext4_group_info *grp = e4b->bd_info;
1722 void *buddy;
1723 int i;
1724 int k;
1725 int max;
1726
1727 BUG_ON(ac->ac_2order <= 0);
1728 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1729 if (grp->bb_counters[i] == 0)
1730 continue;
1731
1732 buddy = mb_find_buddy(e4b, i, &max);
1733 BUG_ON(buddy == NULL);
1734
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001735 k = mb_find_next_zero_bit(buddy, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001736 BUG_ON(k >= max);
1737
1738 ac->ac_found++;
1739
1740 ac->ac_b_ex.fe_len = 1 << i;
1741 ac->ac_b_ex.fe_start = k << i;
1742 ac->ac_b_ex.fe_group = e4b->bd_group;
1743
1744 ext4_mb_use_best_found(ac, e4b);
1745
1746 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1747
1748 if (EXT4_SB(sb)->s_mb_stats)
1749 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1750
1751 break;
1752 }
1753}
1754
1755/*
1756 * The routine scans the group and measures all found extents.
1757 * In order to optimize scanning, caller must pass number of
1758 * free blocks in the group, so the routine can know upper limit.
1759 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001760static noinline_for_stack
1761void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001762 struct ext4_buddy *e4b)
1763{
1764 struct super_block *sb = ac->ac_sb;
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001765 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05001766 struct ext4_free_extent ex;
1767 int i;
1768 int free;
1769
1770 free = e4b->bd_info->bb_free;
1771 BUG_ON(free <= 0);
1772
1773 i = e4b->bd_info->bb_first_free;
1774
1775 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001776 i = mb_find_next_zero_bit(bitmap,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001777 EXT4_CLUSTERS_PER_GROUP(sb), i);
1778 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001779 /*
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001780 * IF we have corrupt bitmap, we won't find any
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001781 * free blocks even though group info says we
1782 * we have free blocks
1783 */
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001784 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001785 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001786 "group info. But bitmap says 0",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001787 free);
Alex Tomasc9de5602008-01-29 00:19:52 -05001788 break;
1789 }
1790
1791 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1792 BUG_ON(ex.fe_len <= 0);
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001793 if (free < ex.fe_len) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001794 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001795 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001796 "group info. But got %d blocks",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001797 free, ex.fe_len);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001798 /*
1799 * The number of free blocks differs. This mostly
1800 * indicate that the bitmap is corrupt. So exit
1801 * without claiming the space.
1802 */
1803 break;
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001804 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001805
1806 ext4_mb_measure_extent(ac, &ex, e4b);
1807
1808 i += ex.fe_len;
1809 free -= ex.fe_len;
1810 }
1811
1812 ext4_mb_check_limits(ac, e4b, 1);
1813}
1814
1815/*
1816 * This is a special case for storages like raid5
Eric Sandeen506bf2d2010-07-27 11:56:06 -04001817 * we try to find stripe-aligned chunks for stripe-size-multiple requests
Alex Tomasc9de5602008-01-29 00:19:52 -05001818 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001819static noinline_for_stack
1820void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001821 struct ext4_buddy *e4b)
1822{
1823 struct super_block *sb = ac->ac_sb;
1824 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001825 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05001826 struct ext4_free_extent ex;
1827 ext4_fsblk_t first_group_block;
1828 ext4_fsblk_t a;
1829 ext4_grpblk_t i;
1830 int max;
1831
1832 BUG_ON(sbi->s_stripe == 0);
1833
1834 /* find first stripe-aligned block in group */
Akinobu Mita5661bd62010-03-03 23:53:39 -05001835 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
1836
Alex Tomasc9de5602008-01-29 00:19:52 -05001837 a = first_group_block + sbi->s_stripe - 1;
1838 do_div(a, sbi->s_stripe);
1839 i = (a * sbi->s_stripe) - first_group_block;
1840
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001841 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001842 if (!mb_test_bit(i, bitmap)) {
1843 max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1844 if (max >= sbi->s_stripe) {
1845 ac->ac_found++;
1846 ac->ac_b_ex = ex;
1847 ext4_mb_use_best_found(ac, e4b);
1848 break;
1849 }
1850 }
1851 i += sbi->s_stripe;
1852 }
1853}
1854
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001855/* This is now called BEFORE we load the buddy bitmap. */
Alex Tomasc9de5602008-01-29 00:19:52 -05001856static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1857 ext4_group_t group, int cr)
1858{
1859 unsigned free, fragments;
Theodore Ts'oa4912122009-03-12 12:18:34 -04001860 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05001861 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1862
1863 BUG_ON(cr < 0 || cr >= 4);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001864
Theodore Ts'o01fc48e2012-08-17 09:46:17 -04001865 free = grp->bb_free;
1866 if (free == 0)
1867 return 0;
1868 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
1869 return 0;
1870
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001871 /* We only do this if the grp has never been initialized */
1872 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1873 int ret = ext4_mb_init_group(ac->ac_sb, group);
1874 if (ret)
1875 return 0;
1876 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001877
Alex Tomasc9de5602008-01-29 00:19:52 -05001878 fragments = grp->bb_fragments;
Alex Tomasc9de5602008-01-29 00:19:52 -05001879 if (fragments == 0)
1880 return 0;
1881
1882 switch (cr) {
1883 case 0:
1884 BUG_ON(ac->ac_2order == 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001885
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001886 if (grp->bb_largest_free_order < ac->ac_2order)
1887 return 0;
1888
Theodore Ts'oa4912122009-03-12 12:18:34 -04001889 /* Avoid using the first bg of a flexgroup for data files */
1890 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
1891 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
1892 ((group % flex_size) == 0))
1893 return 0;
1894
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001895 return 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001896 case 1:
1897 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1898 return 1;
1899 break;
1900 case 2:
1901 if (free >= ac->ac_g_ex.fe_len)
1902 return 1;
1903 break;
1904 case 3:
1905 return 1;
1906 default:
1907 BUG();
1908 }
1909
1910 return 0;
1911}
1912
Eric Sandeen4ddfef72008-04-29 08:11:12 -04001913static noinline_for_stack int
1914ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05001915{
Theodore Ts'o8df96752009-05-01 08:50:38 -04001916 ext4_group_t ngroups, group, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05001917 int cr;
1918 int err = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05001919 struct ext4_sb_info *sbi;
1920 struct super_block *sb;
1921 struct ext4_buddy e4b;
Alex Tomasc9de5602008-01-29 00:19:52 -05001922
1923 sb = ac->ac_sb;
1924 sbi = EXT4_SB(sb);
Theodore Ts'o8df96752009-05-01 08:50:38 -04001925 ngroups = ext4_get_groups_count(sb);
Eric Sandeenfb0a3872009-09-16 14:45:10 -04001926 /* non-extent files are limited to low blocks/groups */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04001927 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04001928 ngroups = sbi->s_blockfile_groups;
1929
Alex Tomasc9de5602008-01-29 00:19:52 -05001930 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1931
1932 /* first, try the goal */
1933 err = ext4_mb_find_by_goal(ac, &e4b);
1934 if (err || ac->ac_status == AC_STATUS_FOUND)
1935 goto out;
1936
1937 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1938 goto out;
1939
1940 /*
1941 * ac->ac2_order is set only if the fe_len is a power of 2
1942 * if ac2_order is set we also set criteria to 0 so that we
1943 * try exact allocation using buddy.
1944 */
1945 i = fls(ac->ac_g_ex.fe_len);
1946 ac->ac_2order = 0;
1947 /*
1948 * We search using buddy data only if the order of the request
1949 * is greater than equal to the sbi_s_mb_order2_reqs
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04001950 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
Alex Tomasc9de5602008-01-29 00:19:52 -05001951 */
1952 if (i >= sbi->s_mb_order2_reqs) {
1953 /*
1954 * This should tell if fe_len is exactly power of 2
1955 */
1956 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1957 ac->ac_2order = i - 1;
1958 }
1959
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04001960 /* if stream allocation is enabled, use global goal */
1961 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001962 /* TBD: may be hot point */
1963 spin_lock(&sbi->s_md_lock);
1964 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1965 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1966 spin_unlock(&sbi->s_md_lock);
1967 }
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04001968
Alex Tomasc9de5602008-01-29 00:19:52 -05001969 /* Let's just scan groups to find more-less suitable blocks */
1970 cr = ac->ac_2order ? 0 : 1;
1971 /*
1972 * cr == 0 try to get exact allocation,
1973 * cr == 3 try to get anything
1974 */
1975repeat:
1976 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1977 ac->ac_criteria = cr;
Aneesh Kumar K.Ved8f9c72008-07-11 19:27:31 -04001978 /*
1979 * searching for the right group start
1980 * from the goal value specified
1981 */
1982 group = ac->ac_g_ex.fe_group;
1983
Theodore Ts'o8df96752009-05-01 08:50:38 -04001984 for (i = 0; i < ngroups; group++, i++) {
Theodore Ts'o8df96752009-05-01 08:50:38 -04001985 if (group == ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -05001986 group = 0;
1987
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001988 /* This now checks without needing the buddy page */
1989 if (!ext4_mb_good_group(ac, group, cr))
Alex Tomasc9de5602008-01-29 00:19:52 -05001990 continue;
1991
Alex Tomasc9de5602008-01-29 00:19:52 -05001992 err = ext4_mb_load_buddy(sb, group, &e4b);
1993 if (err)
1994 goto out;
1995
1996 ext4_lock_group(sb, group);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001997
1998 /*
1999 * We need to check again after locking the
2000 * block group
2001 */
Alex Tomasc9de5602008-01-29 00:19:52 -05002002 if (!ext4_mb_good_group(ac, group, cr)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002003 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002004 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002005 continue;
2006 }
2007
2008 ac->ac_groups_scanned++;
Theodore Ts'o75507ef2009-05-01 12:58:36 -04002009 if (cr == 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002010 ext4_mb_simple_scan_group(ac, &e4b);
Eric Sandeen506bf2d2010-07-27 11:56:06 -04002011 else if (cr == 1 && sbi->s_stripe &&
2012 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
Alex Tomasc9de5602008-01-29 00:19:52 -05002013 ext4_mb_scan_aligned(ac, &e4b);
2014 else
2015 ext4_mb_complex_scan_group(ac, &e4b);
2016
2017 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002018 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002019
2020 if (ac->ac_status != AC_STATUS_CONTINUE)
2021 break;
2022 }
2023 }
2024
2025 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2026 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2027 /*
2028 * We've been searching too long. Let's try to allocate
2029 * the best chunk we've found so far
2030 */
2031
2032 ext4_mb_try_best_found(ac, &e4b);
2033 if (ac->ac_status != AC_STATUS_FOUND) {
2034 /*
2035 * Someone more lucky has already allocated it.
2036 * The only thing we can do is just take first
2037 * found block(s)
2038 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2039 */
2040 ac->ac_b_ex.fe_group = 0;
2041 ac->ac_b_ex.fe_start = 0;
2042 ac->ac_b_ex.fe_len = 0;
2043 ac->ac_status = AC_STATUS_CONTINUE;
2044 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2045 cr = 3;
2046 atomic_inc(&sbi->s_mb_lost_chunks);
2047 goto repeat;
2048 }
2049 }
2050out:
2051 return err;
2052}
2053
Alex Tomasc9de5602008-01-29 00:19:52 -05002054static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2055{
2056 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002057 ext4_group_t group;
2058
Theodore Ts'o8df96752009-05-01 08:50:38 -04002059 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002060 return NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002061 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002062 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002063}
2064
2065static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2066{
2067 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002068 ext4_group_t group;
2069
2070 ++*pos;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002071 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002072 return NULL;
2073 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002074 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002075}
2076
2077static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2078{
2079 struct super_block *sb = seq->private;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002080 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
Alex Tomasc9de5602008-01-29 00:19:52 -05002081 int i;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002082 int err, buddy_loaded = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002083 struct ext4_buddy e4b;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002084 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -05002085 struct sg {
2086 struct ext4_group_info info;
Eric Sandeena36b4492009-08-25 22:36:45 -04002087 ext4_grpblk_t counters[16];
Alex Tomasc9de5602008-01-29 00:19:52 -05002088 } sg;
2089
2090 group--;
2091 if (group == 0)
2092 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2093 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2094 "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2095 "group", "free", "frags", "first",
2096 "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2097 "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2098
2099 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2100 sizeof(struct ext4_group_info);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002101 grinfo = ext4_get_group_info(sb, group);
2102 /* Load the group info in memory only if not already loaded. */
2103 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2104 err = ext4_mb_load_buddy(sb, group, &e4b);
2105 if (err) {
2106 seq_printf(seq, "#%-5u: I/O error\n", group);
2107 return 0;
2108 }
2109 buddy_loaded = 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002110 }
Aditya Kali1c8457c2012-06-30 19:10:57 -04002111
Alex Tomasc9de5602008-01-29 00:19:52 -05002112 memcpy(&sg, ext4_get_group_info(sb, group), i);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002113
2114 if (buddy_loaded)
2115 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002116
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002117 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
Alex Tomasc9de5602008-01-29 00:19:52 -05002118 sg.info.bb_fragments, sg.info.bb_first_free);
2119 for (i = 0; i <= 13; i++)
2120 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2121 sg.info.bb_counters[i] : 0);
2122 seq_printf(seq, " ]\n");
2123
2124 return 0;
2125}
2126
2127static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2128{
2129}
2130
Tobias Klauser7f1346a2009-09-05 09:28:54 -04002131static const struct seq_operations ext4_mb_seq_groups_ops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002132 .start = ext4_mb_seq_groups_start,
2133 .next = ext4_mb_seq_groups_next,
2134 .stop = ext4_mb_seq_groups_stop,
2135 .show = ext4_mb_seq_groups_show,
2136};
2137
2138static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2139{
2140 struct super_block *sb = PDE(inode)->data;
2141 int rc;
2142
2143 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2144 if (rc == 0) {
Joe Perchesa271fe82010-07-27 11:56:04 -04002145 struct seq_file *m = file->private_data;
Alex Tomasc9de5602008-01-29 00:19:52 -05002146 m->private = sb;
2147 }
2148 return rc;
2149
2150}
2151
Tobias Klauser7f1346a2009-09-05 09:28:54 -04002152static const struct file_operations ext4_mb_seq_groups_fops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002153 .owner = THIS_MODULE,
2154 .open = ext4_mb_seq_groups_open,
2155 .read = seq_read,
2156 .llseek = seq_lseek,
2157 .release = seq_release,
2158};
2159
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002160static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2161{
2162 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2163 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2164
2165 BUG_ON(!cachep);
2166 return cachep;
2167}
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002168
2169/* Create and initialize ext4_group_info data for the given group. */
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002170int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002171 struct ext4_group_desc *desc)
2172{
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002173 int i;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002174 int metalen = 0;
2175 struct ext4_sb_info *sbi = EXT4_SB(sb);
2176 struct ext4_group_info **meta_group_info;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002177 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002178
2179 /*
2180 * First check if this group is the first of a reserved block.
2181 * If it's true, we have to allocate a new table of pointers
2182 * to ext4_group_info structures
2183 */
2184 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2185 metalen = sizeof(*meta_group_info) <<
2186 EXT4_DESC_PER_BLOCK_BITS(sb);
2187 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2188 if (meta_group_info == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002189 ext4_msg(sb, KERN_ERR, "can't allocate mem "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002190 "for a buddy group");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002191 goto exit_meta_group_info;
2192 }
2193 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2194 meta_group_info;
2195 }
2196
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002197 meta_group_info =
2198 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2199 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2200
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002201 meta_group_info[i] = kmem_cache_alloc(cachep, GFP_KERNEL);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002202 if (meta_group_info[i] == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002203 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002204 goto exit_group_info;
2205 }
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002206 memset(meta_group_info[i], 0, kmem_cache_size(cachep));
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002207 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2208 &(meta_group_info[i]->bb_state));
2209
2210 /*
2211 * initialize bb_free to be able to skip
2212 * empty groups without initialization
2213 */
2214 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2215 meta_group_info[i]->bb_free =
Theodore Ts'ocff1dfd2011-09-09 19:12:51 -04002216 ext4_free_clusters_after_init(sb, group, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002217 } else {
2218 meta_group_info[i]->bb_free =
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002219 ext4_free_group_clusters(sb, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002220 }
2221
2222 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002223 init_rwsem(&meta_group_info[i]->alloc_sem);
Venkatesh Pallipadi64e290e2010-03-04 22:25:21 -05002224 meta_group_info[i]->bb_free_root = RB_ROOT;
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002225 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002226
2227#ifdef DOUBLE_CHECK
2228 {
2229 struct buffer_head *bh;
2230 meta_group_info[i]->bb_bitmap =
2231 kmalloc(sb->s_blocksize, GFP_KERNEL);
2232 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2233 bh = ext4_read_block_bitmap(sb, group);
2234 BUG_ON(bh == NULL);
2235 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2236 sb->s_blocksize);
2237 put_bh(bh);
2238 }
2239#endif
2240
2241 return 0;
2242
2243exit_group_info:
2244 /* If a meta_group_info table has been allocated, release it now */
Tao Macaaf7a22011-07-11 18:42:42 -04002245 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002246 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
Tao Macaaf7a22011-07-11 18:42:42 -04002247 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2248 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002249exit_meta_group_info:
2250 return -ENOMEM;
2251} /* ext4_mb_add_groupinfo */
2252
Alex Tomasc9de5602008-01-29 00:19:52 -05002253static int ext4_mb_init_backend(struct super_block *sb)
2254{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002255 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002256 ext4_group_t i;
Alex Tomasc9de5602008-01-29 00:19:52 -05002257 struct ext4_sb_info *sbi = EXT4_SB(sb);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002258 struct ext4_super_block *es = sbi->s_es;
2259 int num_meta_group_infos;
2260 int num_meta_group_infos_max;
2261 int array_size;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002262 struct ext4_group_desc *desc;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002263 struct kmem_cache *cachep;
Alex Tomasc9de5602008-01-29 00:19:52 -05002264
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002265 /* This is the number of blocks used by GDT */
Theodore Ts'o8df96752009-05-01 08:50:38 -04002266 num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) -
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002267 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2268
2269 /*
2270 * This is the total number of blocks used by GDT including
2271 * the number of reserved blocks for GDT.
2272 * The s_group_info array is allocated with this value
2273 * to allow a clean online resize without a complex
2274 * manipulation of pointer.
2275 * The drawback is the unused memory when no resize
2276 * occurs but it's very low in terms of pages
2277 * (see comments below)
2278 * Need to handle this properly when META_BG resizing is allowed
2279 */
2280 num_meta_group_infos_max = num_meta_group_infos +
2281 le16_to_cpu(es->s_reserved_gdt_blocks);
2282
2283 /*
2284 * array_size is the size of s_group_info array. We round it
2285 * to the next power of two because this approximation is done
2286 * internally by kmalloc so we can have some more memory
2287 * for free here (e.g. may be used for META_BG resize).
2288 */
2289 array_size = 1;
2290 while (array_size < sizeof(*sbi->s_group_info) *
2291 num_meta_group_infos_max)
2292 array_size = array_size << 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002293 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2294 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2295 * So a two level scheme suffices for now. */
Theodore Ts'of18a5f22011-08-01 08:45:38 -04002296 sbi->s_group_info = ext4_kvzalloc(array_size, GFP_KERNEL);
Alex Tomasc9de5602008-01-29 00:19:52 -05002297 if (sbi->s_group_info == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002298 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
Alex Tomasc9de5602008-01-29 00:19:52 -05002299 return -ENOMEM;
2300 }
2301 sbi->s_buddy_cache = new_inode(sb);
2302 if (sbi->s_buddy_cache == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002303 ext4_msg(sb, KERN_ERR, "can't get new inode");
Alex Tomasc9de5602008-01-29 00:19:52 -05002304 goto err_freesgi;
2305 }
Yu Jian48e60612011-08-01 17:41:39 -04002306 /* To avoid potentially colliding with an valid on-disk inode number,
2307 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
2308 * not in the inode hash, so it should never be found by iget(), but
2309 * this will avoid confusion if it ever shows up during debugging. */
2310 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
Alex Tomasc9de5602008-01-29 00:19:52 -05002311 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002312 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002313 desc = ext4_get_group_desc(sb, i, NULL);
2314 if (desc == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002315 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002316 goto err_freebuddy;
2317 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002318 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2319 goto err_freebuddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05002320 }
2321
2322 return 0;
2323
2324err_freebuddy:
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002325 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Roel Kluinf1fa3342008-04-29 22:01:15 -04002326 while (i-- > 0)
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002327 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
Alex Tomasc9de5602008-01-29 00:19:52 -05002328 i = num_meta_group_infos;
Roel Kluinf1fa3342008-04-29 22:01:15 -04002329 while (i-- > 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002330 kfree(sbi->s_group_info[i]);
2331 iput(sbi->s_buddy_cache);
2332err_freesgi:
Theodore Ts'of18a5f22011-08-01 08:45:38 -04002333 ext4_kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002334 return -ENOMEM;
2335}
2336
Eric Sandeen2892c152011-02-12 08:12:18 -05002337static void ext4_groupinfo_destroy_slabs(void)
2338{
2339 int i;
2340
2341 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2342 if (ext4_groupinfo_caches[i])
2343 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2344 ext4_groupinfo_caches[i] = NULL;
2345 }
2346}
2347
2348static int ext4_groupinfo_create_slab(size_t size)
2349{
2350 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2351 int slab_size;
2352 int blocksize_bits = order_base_2(size);
2353 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2354 struct kmem_cache *cachep;
2355
2356 if (cache_index >= NR_GRPINFO_CACHES)
2357 return -EINVAL;
2358
2359 if (unlikely(cache_index < 0))
2360 cache_index = 0;
2361
2362 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2363 if (ext4_groupinfo_caches[cache_index]) {
2364 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2365 return 0; /* Already created */
2366 }
2367
2368 slab_size = offsetof(struct ext4_group_info,
2369 bb_counters[blocksize_bits + 2]);
2370
2371 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2372 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2373 NULL);
2374
Tao Ma823ba012011-07-11 18:26:01 -04002375 ext4_groupinfo_caches[cache_index] = cachep;
2376
Eric Sandeen2892c152011-02-12 08:12:18 -05002377 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2378 if (!cachep) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002379 printk(KERN_EMERG
2380 "EXT4-fs: no memory for groupinfo slab cache\n");
Eric Sandeen2892c152011-02-12 08:12:18 -05002381 return -ENOMEM;
2382 }
2383
Eric Sandeen2892c152011-02-12 08:12:18 -05002384 return 0;
2385}
2386
Akira Fujita9d990122012-05-28 14:19:25 -04002387int ext4_mb_init(struct super_block *sb)
Alex Tomasc9de5602008-01-29 00:19:52 -05002388{
2389 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002390 unsigned i, j;
Alex Tomasc9de5602008-01-29 00:19:52 -05002391 unsigned offset;
2392 unsigned max;
Shen Feng74767c52008-07-11 19:27:31 -04002393 int ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002394
Eric Sandeen19278052009-08-25 22:36:25 -04002395 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
Alex Tomasc9de5602008-01-29 00:19:52 -05002396
2397 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2398 if (sbi->s_mb_offsets == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002399 ret = -ENOMEM;
2400 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002401 }
Yasunori Gotoff7ef322008-12-17 00:48:39 -05002402
Eric Sandeen19278052009-08-25 22:36:25 -04002403 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
Alex Tomasc9de5602008-01-29 00:19:52 -05002404 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2405 if (sbi->s_mb_maxs == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002406 ret = -ENOMEM;
2407 goto out;
2408 }
2409
Eric Sandeen2892c152011-02-12 08:12:18 -05002410 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2411 if (ret < 0)
2412 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002413
2414 /* order 0 is regular bitmap */
2415 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2416 sbi->s_mb_offsets[0] = 0;
2417
2418 i = 1;
2419 offset = 0;
2420 max = sb->s_blocksize << 2;
2421 do {
2422 sbi->s_mb_offsets[i] = offset;
2423 sbi->s_mb_maxs[i] = max;
2424 offset += 1 << (sb->s_blocksize_bits - i);
2425 max = max >> 1;
2426 i++;
2427 } while (i <= sb->s_blocksize_bits + 1);
2428
Alex Tomasc9de5602008-01-29 00:19:52 -05002429 spin_lock_init(&sbi->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05002430 spin_lock_init(&sbi->s_bal_lock);
2431
2432 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2433 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2434 sbi->s_mb_stats = MB_DEFAULT_STATS;
2435 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2436 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
Theodore Ts'o27baebb2011-09-09 19:02:51 -04002437 /*
2438 * The default group preallocation is 512, which for 4k block
2439 * sizes translates to 2 megabytes. However for bigalloc file
2440 * systems, this is probably too big (i.e, if the cluster size
2441 * is 1 megabyte, then group preallocation size becomes half a
2442 * gigabyte!). As a default, we will keep a two megabyte
2443 * group pralloc size for cluster sizes up to 64k, and after
2444 * that, we will force a minimum group preallocation size of
2445 * 32 clusters. This translates to 8 megs when the cluster
2446 * size is 256k, and 32 megs when the cluster size is 1 meg,
2447 * which seems reasonable as a default.
2448 */
2449 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2450 sbi->s_cluster_bits, 32);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04002451 /*
2452 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2453 * to the lowest multiple of s_stripe which is bigger than
2454 * the s_mb_group_prealloc as determined above. We want
2455 * the preallocation size to be an exact multiple of the
2456 * RAID stripe size so that preallocations don't fragment
2457 * the stripes.
2458 */
2459 if (sbi->s_stripe > 1) {
2460 sbi->s_mb_group_prealloc = roundup(
2461 sbi->s_mb_group_prealloc, sbi->s_stripe);
2462 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002463
Eric Sandeen730c2132008-09-13 15:23:29 -04002464 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002465 if (sbi->s_locality_groups == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002466 ret = -ENOMEM;
Tao Ma7aa0bae2011-10-06 10:22:28 -04002467 goto out_free_groupinfo_slab;
Alex Tomasc9de5602008-01-29 00:19:52 -05002468 }
Eric Sandeen730c2132008-09-13 15:23:29 -04002469 for_each_possible_cpu(i) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002470 struct ext4_locality_group *lg;
Eric Sandeen730c2132008-09-13 15:23:29 -04002471 lg = per_cpu_ptr(sbi->s_locality_groups, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002472 mutex_init(&lg->lg_mutex);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002473 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2474 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
Alex Tomasc9de5602008-01-29 00:19:52 -05002475 spin_lock_init(&lg->lg_prealloc_lock);
2476 }
2477
Yu Jian79a77c52011-08-01 17:41:46 -04002478 /* init file for buddy data */
2479 ret = ext4_mb_init_backend(sb);
Tao Ma7aa0bae2011-10-06 10:22:28 -04002480 if (ret != 0)
2481 goto out_free_locality_groups;
Yu Jian79a77c52011-08-01 17:41:46 -04002482
Theodore Ts'o296c3552009-09-30 00:32:42 -04002483 if (sbi->s_proc)
2484 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2485 &ext4_mb_seq_groups_fops, sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002486
Tao Ma7aa0bae2011-10-06 10:22:28 -04002487 return 0;
2488
2489out_free_locality_groups:
2490 free_percpu(sbi->s_locality_groups);
2491 sbi->s_locality_groups = NULL;
2492out_free_groupinfo_slab:
2493 ext4_groupinfo_destroy_slabs();
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002494out:
Tao Ma7aa0bae2011-10-06 10:22:28 -04002495 kfree(sbi->s_mb_offsets);
2496 sbi->s_mb_offsets = NULL;
2497 kfree(sbi->s_mb_maxs);
2498 sbi->s_mb_maxs = NULL;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002499 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002500}
2501
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002502/* need to called with the ext4 group lock held */
Alex Tomasc9de5602008-01-29 00:19:52 -05002503static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2504{
2505 struct ext4_prealloc_space *pa;
2506 struct list_head *cur, *tmp;
2507 int count = 0;
2508
2509 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2510 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2511 list_del(&pa->pa_group_list);
2512 count++;
Aneesh Kumar K.V688f05a2008-10-13 12:14:14 -04002513 kmem_cache_free(ext4_pspace_cachep, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05002514 }
2515 if (count)
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002516 mb_debug(1, "mballoc: %u PAs left\n", count);
Alex Tomasc9de5602008-01-29 00:19:52 -05002517
2518}
2519
2520int ext4_mb_release(struct super_block *sb)
2521{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002522 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002523 ext4_group_t i;
2524 int num_meta_group_infos;
2525 struct ext4_group_info *grinfo;
2526 struct ext4_sb_info *sbi = EXT4_SB(sb);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002527 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Alex Tomasc9de5602008-01-29 00:19:52 -05002528
Salman Qazi95599962012-05-31 23:52:14 -04002529 if (sbi->s_proc)
2530 remove_proc_entry("mb_groups", sbi->s_proc);
2531
Alex Tomasc9de5602008-01-29 00:19:52 -05002532 if (sbi->s_group_info) {
Theodore Ts'o8df96752009-05-01 08:50:38 -04002533 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002534 grinfo = ext4_get_group_info(sb, i);
2535#ifdef DOUBLE_CHECK
2536 kfree(grinfo->bb_bitmap);
2537#endif
2538 ext4_lock_group(sb, i);
2539 ext4_mb_cleanup_pa(grinfo);
2540 ext4_unlock_group(sb, i);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002541 kmem_cache_free(cachep, grinfo);
Alex Tomasc9de5602008-01-29 00:19:52 -05002542 }
Theodore Ts'o8df96752009-05-01 08:50:38 -04002543 num_meta_group_infos = (ngroups +
Alex Tomasc9de5602008-01-29 00:19:52 -05002544 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2545 EXT4_DESC_PER_BLOCK_BITS(sb);
2546 for (i = 0; i < num_meta_group_infos; i++)
2547 kfree(sbi->s_group_info[i]);
Theodore Ts'of18a5f22011-08-01 08:45:38 -04002548 ext4_kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002549 }
2550 kfree(sbi->s_mb_offsets);
2551 kfree(sbi->s_mb_maxs);
2552 if (sbi->s_buddy_cache)
2553 iput(sbi->s_buddy_cache);
2554 if (sbi->s_mb_stats) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002555 ext4_msg(sb, KERN_INFO,
2556 "mballoc: %u blocks %u reqs (%u success)",
Alex Tomasc9de5602008-01-29 00:19:52 -05002557 atomic_read(&sbi->s_bal_allocated),
2558 atomic_read(&sbi->s_bal_reqs),
2559 atomic_read(&sbi->s_bal_success));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002560 ext4_msg(sb, KERN_INFO,
2561 "mballoc: %u extents scanned, %u goal hits, "
2562 "%u 2^N hits, %u breaks, %u lost",
Alex Tomasc9de5602008-01-29 00:19:52 -05002563 atomic_read(&sbi->s_bal_ex_scanned),
2564 atomic_read(&sbi->s_bal_goals),
2565 atomic_read(&sbi->s_bal_2orders),
2566 atomic_read(&sbi->s_bal_breaks),
2567 atomic_read(&sbi->s_mb_lost_chunks));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002568 ext4_msg(sb, KERN_INFO,
2569 "mballoc: %lu generated and it took %Lu",
Tao Maced156e2011-07-23 16:18:05 -04002570 sbi->s_mb_buddies_generated,
Alex Tomasc9de5602008-01-29 00:19:52 -05002571 sbi->s_mb_generation_time);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002572 ext4_msg(sb, KERN_INFO,
2573 "mballoc: %u preallocated, %u discarded",
Alex Tomasc9de5602008-01-29 00:19:52 -05002574 atomic_read(&sbi->s_mb_preallocated),
2575 atomic_read(&sbi->s_mb_discarded));
2576 }
2577
Eric Sandeen730c2132008-09-13 15:23:29 -04002578 free_percpu(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05002579
2580 return 0;
2581}
2582
Lukas Czerner77ca6cd2010-10-27 21:30:11 -04002583static inline int ext4_issue_discard(struct super_block *sb,
Theodore Ts'o84130192011-09-09 18:50:51 -04002584 ext4_group_t block_group, ext4_grpblk_t cluster, int count)
Jiaying Zhang5c521832010-07-27 11:56:05 -04002585{
Jiaying Zhang5c521832010-07-27 11:56:05 -04002586 ext4_fsblk_t discard_block;
2587
Theodore Ts'o84130192011-09-09 18:50:51 -04002588 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2589 ext4_group_first_block_no(sb, block_group));
2590 count = EXT4_C2B(EXT4_SB(sb), count);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002591 trace_ext4_discard_blocks(sb,
2592 (unsigned long long) discard_block, count);
Lukas Czerner93259632011-01-10 12:09:59 -05002593 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002594}
2595
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04002596/*
2597 * This function is called by the jbd2 layer once the commit has finished,
2598 * so we know we can free the blocks that were released with that commit.
2599 */
Bobi Jam18aadd42012-02-20 17:53:02 -05002600static void ext4_free_data_callback(struct super_block *sb,
2601 struct ext4_journal_cb_entry *jce,
2602 int rc)
Alex Tomasc9de5602008-01-29 00:19:52 -05002603{
Bobi Jam18aadd42012-02-20 17:53:02 -05002604 struct ext4_free_data *entry = (struct ext4_free_data *)jce;
Alex Tomasc9de5602008-01-29 00:19:52 -05002605 struct ext4_buddy e4b;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002606 struct ext4_group_info *db;
Theodore Ts'od9f34502011-04-30 13:47:24 -04002607 int err, count = 0, count2 = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002608
Bobi Jam18aadd42012-02-20 17:53:02 -05002609 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2610 entry->efd_count, entry->efd_group, entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05002611
Bobi Jam18aadd42012-02-20 17:53:02 -05002612 if (test_opt(sb, DISCARD))
2613 ext4_issue_discard(sb, entry->efd_group,
2614 entry->efd_start_cluster, entry->efd_count);
Alex Tomasc9de5602008-01-29 00:19:52 -05002615
Bobi Jam18aadd42012-02-20 17:53:02 -05002616 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2617 /* we expect to find existing buddy because it's pinned */
2618 BUG_ON(err != 0);
Theodore Ts'ob90f6872010-04-20 16:51:59 -04002619
Alex Tomasc9de5602008-01-29 00:19:52 -05002620
Bobi Jam18aadd42012-02-20 17:53:02 -05002621 db = e4b.bd_info;
2622 /* there are blocks to put in buddy to make them really free */
2623 count += entry->efd_count;
2624 count2++;
2625 ext4_lock_group(sb, entry->efd_group);
2626 /* Take it out of per group rb tree */
2627 rb_erase(&entry->efd_node, &(db->bb_free_root));
2628 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002629
Bobi Jam18aadd42012-02-20 17:53:02 -05002630 /*
2631 * Clear the trimmed flag for the group so that the next
2632 * ext4_trim_fs can trim it.
2633 * If the volume is mounted with -o discard, online discard
2634 * is supported and the free blocks will be trimmed online.
2635 */
2636 if (!test_opt(sb, DISCARD))
2637 EXT4_MB_GRP_CLEAR_TRIMMED(db);
2638
2639 if (!db->bb_free_root.rb_node) {
2640 /* No more items in the per group rb tree
2641 * balance refcounts from ext4_mb_free_metadata()
Tao Ma3d56b8d2011-07-11 00:03:38 -04002642 */
Bobi Jam18aadd42012-02-20 17:53:02 -05002643 page_cache_release(e4b.bd_buddy_page);
2644 page_cache_release(e4b.bd_bitmap_page);
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04002645 }
Bobi Jam18aadd42012-02-20 17:53:02 -05002646 ext4_unlock_group(sb, entry->efd_group);
2647 kmem_cache_free(ext4_free_data_cachep, entry);
2648 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002649
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002650 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
Alex Tomasc9de5602008-01-29 00:19:52 -05002651}
2652
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002653#ifdef CONFIG_EXT4_DEBUG
2654u8 mb_enable_debug __read_mostly;
2655
2656static struct dentry *debugfs_dir;
2657static struct dentry *debugfs_debug;
2658
2659static void __init ext4_create_debugfs_entry(void)
2660{
2661 debugfs_dir = debugfs_create_dir("ext4", NULL);
2662 if (debugfs_dir)
2663 debugfs_debug = debugfs_create_u8("mballoc-debug",
2664 S_IRUGO | S_IWUSR,
2665 debugfs_dir,
2666 &mb_enable_debug);
2667}
2668
2669static void ext4_remove_debugfs_entry(void)
2670{
2671 debugfs_remove(debugfs_debug);
2672 debugfs_remove(debugfs_dir);
2673}
2674
2675#else
2676
2677static void __init ext4_create_debugfs_entry(void)
2678{
2679}
2680
2681static void ext4_remove_debugfs_entry(void)
2682{
2683}
2684
2685#endif
2686
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002687int __init ext4_init_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002688{
Theodore Ts'o16828082010-10-27 21:30:09 -04002689 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2690 SLAB_RECLAIM_ACCOUNT);
Alex Tomasc9de5602008-01-29 00:19:52 -05002691 if (ext4_pspace_cachep == NULL)
2692 return -ENOMEM;
2693
Theodore Ts'o16828082010-10-27 21:30:09 -04002694 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2695 SLAB_RECLAIM_ACCOUNT);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002696 if (ext4_ac_cachep == NULL) {
2697 kmem_cache_destroy(ext4_pspace_cachep);
2698 return -ENOMEM;
2699 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002700
Bobi Jam18aadd42012-02-20 17:53:02 -05002701 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2702 SLAB_RECLAIM_ACCOUNT);
2703 if (ext4_free_data_cachep == NULL) {
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002704 kmem_cache_destroy(ext4_pspace_cachep);
2705 kmem_cache_destroy(ext4_ac_cachep);
2706 return -ENOMEM;
2707 }
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002708 ext4_create_debugfs_entry();
Alex Tomasc9de5602008-01-29 00:19:52 -05002709 return 0;
2710}
2711
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002712void ext4_exit_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002713{
Theodore Ts'o60e66792010-05-17 07:00:00 -04002714 /*
Jesper Dangaard Brouer3e03f9c2009-07-05 22:29:27 -04002715 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2716 * before destroying the slab cache.
2717 */
2718 rcu_barrier();
Alex Tomasc9de5602008-01-29 00:19:52 -05002719 kmem_cache_destroy(ext4_pspace_cachep);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002720 kmem_cache_destroy(ext4_ac_cachep);
Bobi Jam18aadd42012-02-20 17:53:02 -05002721 kmem_cache_destroy(ext4_free_data_cachep);
Eric Sandeen2892c152011-02-12 08:12:18 -05002722 ext4_groupinfo_destroy_slabs();
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002723 ext4_remove_debugfs_entry();
Alex Tomasc9de5602008-01-29 00:19:52 -05002724}
2725
2726
2727/*
Uwe Kleine-König73b2c712010-07-30 21:02:47 +02002728 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
Alex Tomasc9de5602008-01-29 00:19:52 -05002729 * Returns 0 if success or error code
2730 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002731static noinline_for_stack int
2732ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002733 handle_t *handle, unsigned int reserv_clstrs)
Alex Tomasc9de5602008-01-29 00:19:52 -05002734{
2735 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002736 struct ext4_group_desc *gdp;
2737 struct buffer_head *gdp_bh;
2738 struct ext4_sb_info *sbi;
2739 struct super_block *sb;
2740 ext4_fsblk_t block;
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002741 int err, len;
Alex Tomasc9de5602008-01-29 00:19:52 -05002742
2743 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2744 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2745
2746 sb = ac->ac_sb;
2747 sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002748
2749 err = -EIO;
Theodore Ts'o574ca172008-07-11 19:27:31 -04002750 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002751 if (!bitmap_bh)
2752 goto out_err;
2753
2754 err = ext4_journal_get_write_access(handle, bitmap_bh);
2755 if (err)
2756 goto out_err;
2757
2758 err = -EIO;
2759 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2760 if (!gdp)
2761 goto out_err;
2762
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002763 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002764 ext4_free_group_clusters(sb, gdp));
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04002765
Alex Tomasc9de5602008-01-29 00:19:52 -05002766 err = ext4_journal_get_write_access(handle, gdp_bh);
2767 if (err)
2768 goto out_err;
2769
Akinobu Mitabda00de2010-03-03 23:53:25 -05002770 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05002771
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002772 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -04002773 if (!ext4_data_block_valid(sbi, block, len)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05002774 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
Theodore Ts'o1084f252012-03-19 23:13:43 -04002775 "fs metadata", block, block+len);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002776 /* File system mounted not to panic on error
2777 * Fix the bitmap and repeat the block allocation
2778 * We leak some of the blocks here.
2779 */
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002780 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04002781 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2782 ac->ac_b_ex.fe_len);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002783 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Frank Mayhar03901312009-01-07 00:06:22 -05002784 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002785 if (!err)
2786 err = -EAGAIN;
2787 goto out_err;
Alex Tomasc9de5602008-01-29 00:19:52 -05002788 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002789
2790 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002791#ifdef AGGRESSIVE_CHECK
2792 {
2793 int i;
2794 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2795 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2796 bitmap_bh->b_data));
2797 }
2798 }
2799#endif
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04002800 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2801 ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05002802 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2803 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002804 ext4_free_group_clusters_set(sb, gdp,
Theodore Ts'ocff1dfd2011-09-09 19:12:51 -04002805 ext4_free_clusters_after_init(sb,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002806 ac->ac_b_ex.fe_group, gdp));
Alex Tomasc9de5602008-01-29 00:19:52 -05002807 }
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002808 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2809 ext4_free_group_clusters_set(sb, gdp, len);
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -04002810 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
2811 EXT4_BLOCKS_PER_GROUP(sb) / 8);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04002812 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002813
2814 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04002815 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
Mingming Caod2a17632008-07-14 17:52:37 -04002816 /*
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04002817 * Now reduce the dirty block count also. Should not go negative
Mingming Caod2a17632008-07-14 17:52:37 -04002818 */
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04002819 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2820 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04002821 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2822 reserv_clstrs);
Alex Tomasc9de5602008-01-29 00:19:52 -05002823
Jose R. Santos772cb7c2008-07-11 19:27:31 -04002824 if (sbi->s_log_groups_per_flex) {
2825 ext4_group_t flex_group = ext4_flex_group(sbi,
2826 ac->ac_b_ex.fe_group);
Theodore Ts'o9f24e422009-03-04 19:09:10 -05002827 atomic_sub(ac->ac_b_ex.fe_len,
Theodore Ts'o24aaa8e2011-09-09 18:58:51 -04002828 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04002829 }
2830
Frank Mayhar03901312009-01-07 00:06:22 -05002831 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05002832 if (err)
2833 goto out_err;
Frank Mayhar03901312009-01-07 00:06:22 -05002834 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05002835
2836out_err:
Aneesh Kumar K.V42a10ad2008-02-10 01:07:28 -05002837 brelse(bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05002838 return err;
2839}
2840
2841/*
2842 * here we normalize request for locality group
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04002843 * Group request are normalized to s_mb_group_prealloc, which goes to
2844 * s_strip if we set the same via mount option.
2845 * s_mb_group_prealloc can be configured via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04002846 * /sys/fs/ext4/<partition>/mb_group_prealloc
Alex Tomasc9de5602008-01-29 00:19:52 -05002847 *
2848 * XXX: should we try to preallocate more than the group has now?
2849 */
2850static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2851{
2852 struct super_block *sb = ac->ac_sb;
2853 struct ext4_locality_group *lg = ac->ac_lg;
2854
2855 BUG_ON(lg == NULL);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04002856 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002857 mb_debug(1, "#%u: goal %u blocks for locality group\n",
Alex Tomasc9de5602008-01-29 00:19:52 -05002858 current->pid, ac->ac_g_ex.fe_len);
2859}
2860
2861/*
2862 * Normalization means making request better in terms of
2863 * size and alignment
2864 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002865static noinline_for_stack void
2866ext4_mb_normalize_request(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05002867 struct ext4_allocation_request *ar)
2868{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002869 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002870 int bsbits, max;
2871 ext4_lblk_t end;
Curt Wohlgemuth1592d2c2012-02-20 17:53:03 -05002872 loff_t size, start_off;
2873 loff_t orig_size __maybe_unused;
Andi Kleen5a0790c2010-06-14 13:28:03 -04002874 ext4_lblk_t start;
Alex Tomasc9de5602008-01-29 00:19:52 -05002875 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04002876 struct ext4_prealloc_space *pa;
Alex Tomasc9de5602008-01-29 00:19:52 -05002877
2878 /* do normalize only data requests, metadata requests
2879 do not need preallocation */
2880 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2881 return;
2882
2883 /* sometime caller may want exact blocks */
2884 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2885 return;
2886
2887 /* caller may indicate that preallocation isn't
2888 * required (it's a tail, for example) */
2889 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2890 return;
2891
2892 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2893 ext4_mb_normalize_group_request(ac);
2894 return ;
2895 }
2896
2897 bsbits = ac->ac_sb->s_blocksize_bits;
2898
2899 /* first, let's learn actual file size
2900 * given current request is allocated */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002901 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05002902 size = size << bsbits;
2903 if (size < i_size_read(ac->ac_inode))
2904 size = i_size_read(ac->ac_inode);
Andi Kleen5a0790c2010-06-14 13:28:03 -04002905 orig_size = size;
Alex Tomasc9de5602008-01-29 00:19:52 -05002906
Valerie Clement19304792008-05-13 19:31:14 -04002907 /* max size of free chunks */
2908 max = 2 << bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05002909
Valerie Clement19304792008-05-13 19:31:14 -04002910#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
2911 (req <= (size) || max <= (chunk_size))
Alex Tomasc9de5602008-01-29 00:19:52 -05002912
2913 /* first, try to predict filesize */
2914 /* XXX: should this table be tunable? */
2915 start_off = 0;
2916 if (size <= 16 * 1024) {
2917 size = 16 * 1024;
2918 } else if (size <= 32 * 1024) {
2919 size = 32 * 1024;
2920 } else if (size <= 64 * 1024) {
2921 size = 64 * 1024;
2922 } else if (size <= 128 * 1024) {
2923 size = 128 * 1024;
2924 } else if (size <= 256 * 1024) {
2925 size = 256 * 1024;
2926 } else if (size <= 512 * 1024) {
2927 size = 512 * 1024;
2928 } else if (size <= 1024 * 1024) {
2929 size = 1024 * 1024;
Valerie Clement19304792008-05-13 19:31:14 -04002930 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002931 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
Valerie Clement19304792008-05-13 19:31:14 -04002932 (21 - bsbits)) << 21;
2933 size = 2 * 1024 * 1024;
2934 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002935 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2936 (22 - bsbits)) << 22;
2937 size = 4 * 1024 * 1024;
2938 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
Valerie Clement19304792008-05-13 19:31:14 -04002939 (8<<20)>>bsbits, max, 8 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002940 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
2941 (23 - bsbits)) << 23;
2942 size = 8 * 1024 * 1024;
2943 } else {
2944 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
2945 size = ac->ac_o_ex.fe_len << bsbits;
2946 }
Andi Kleen5a0790c2010-06-14 13:28:03 -04002947 size = size >> bsbits;
2948 start = start_off >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05002949
2950 /* don't cover already allocated blocks in selected range */
2951 if (ar->pleft && start <= ar->lleft) {
2952 size -= ar->lleft + 1 - start;
2953 start = ar->lleft + 1;
2954 }
2955 if (ar->pright && start + size - 1 >= ar->lright)
2956 size -= start + size - ar->lright;
2957
2958 end = start + size;
2959
2960 /* check we don't cross already preallocated blocks */
2961 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04002962 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05002963 ext4_lblk_t pa_end;
Alex Tomasc9de5602008-01-29 00:19:52 -05002964
Alex Tomasc9de5602008-01-29 00:19:52 -05002965 if (pa->pa_deleted)
2966 continue;
2967 spin_lock(&pa->pa_lock);
2968 if (pa->pa_deleted) {
2969 spin_unlock(&pa->pa_lock);
2970 continue;
2971 }
2972
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002973 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
2974 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05002975
2976 /* PA must not overlap original request */
2977 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
2978 ac->ac_o_ex.fe_logical < pa->pa_lstart));
2979
Eric Sandeen38877f42009-08-17 23:55:24 -04002980 /* skip PAs this normalized request doesn't overlap with */
2981 if (pa->pa_lstart >= end || pa_end <= start) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002982 spin_unlock(&pa->pa_lock);
2983 continue;
2984 }
2985 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
2986
Eric Sandeen38877f42009-08-17 23:55:24 -04002987 /* adjust start or end to be adjacent to this pa */
Alex Tomasc9de5602008-01-29 00:19:52 -05002988 if (pa_end <= ac->ac_o_ex.fe_logical) {
2989 BUG_ON(pa_end < start);
2990 start = pa_end;
Eric Sandeen38877f42009-08-17 23:55:24 -04002991 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002992 BUG_ON(pa->pa_lstart > end);
2993 end = pa->pa_lstart;
2994 }
2995 spin_unlock(&pa->pa_lock);
2996 }
2997 rcu_read_unlock();
2998 size = end - start;
2999
3000 /* XXX: extra loop to check we really don't overlap preallocations */
3001 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003002 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003003 ext4_lblk_t pa_end;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003004
Alex Tomasc9de5602008-01-29 00:19:52 -05003005 spin_lock(&pa->pa_lock);
3006 if (pa->pa_deleted == 0) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003007 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3008 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003009 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3010 }
3011 spin_unlock(&pa->pa_lock);
3012 }
3013 rcu_read_unlock();
3014
3015 if (start + size <= ac->ac_o_ex.fe_logical &&
3016 start > ac->ac_o_ex.fe_logical) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003017 ext4_msg(ac->ac_sb, KERN_ERR,
3018 "start %lu, size %lu, fe_logical %lu",
3019 (unsigned long) start, (unsigned long) size,
3020 (unsigned long) ac->ac_o_ex.fe_logical);
Alex Tomasc9de5602008-01-29 00:19:52 -05003021 }
3022 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3023 start > ac->ac_o_ex.fe_logical);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04003024 BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05003025
3026 /* now prepare goal request */
3027
3028 /* XXX: is it better to align blocks WRT to logical
3029 * placement or satisfy big request as is */
3030 ac->ac_g_ex.fe_logical = start;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003031 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
Alex Tomasc9de5602008-01-29 00:19:52 -05003032
3033 /* define goal start in order to merge */
3034 if (ar->pright && (ar->lright == (start + size))) {
3035 /* merge to the right */
3036 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3037 &ac->ac_f_ex.fe_group,
3038 &ac->ac_f_ex.fe_start);
3039 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3040 }
3041 if (ar->pleft && (ar->lleft + 1 == start)) {
3042 /* merge to the left */
3043 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3044 &ac->ac_f_ex.fe_group,
3045 &ac->ac_f_ex.fe_start);
3046 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3047 }
3048
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003049 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
Alex Tomasc9de5602008-01-29 00:19:52 -05003050 (unsigned) orig_size, (unsigned) start);
3051}
3052
3053static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3054{
3055 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3056
3057 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3058 atomic_inc(&sbi->s_bal_reqs);
3059 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
Curt Wohlgemuth291dae42010-05-16 16:00:00 -04003060 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
Alex Tomasc9de5602008-01-29 00:19:52 -05003061 atomic_inc(&sbi->s_bal_success);
3062 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3063 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3064 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3065 atomic_inc(&sbi->s_bal_goals);
3066 if (ac->ac_found > sbi->s_mb_max_to_scan)
3067 atomic_inc(&sbi->s_bal_breaks);
3068 }
3069
Theodore Ts'o296c3552009-09-30 00:32:42 -04003070 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3071 trace_ext4_mballoc_alloc(ac);
3072 else
3073 trace_ext4_mballoc_prealloc(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05003074}
3075
3076/*
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003077 * Called on failure; free up any blocks from the inode PA for this
3078 * context. We don't need this for MB_GROUP_PA because we only change
3079 * pa_free in ext4_mb_release_context(), but on failure, we've already
3080 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3081 */
3082static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3083{
3084 struct ext4_prealloc_space *pa = ac->ac_pa;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003085
Zheng Liu400db9d2012-05-28 17:53:53 -04003086 if (pa && pa->pa_type == MB_INODE_PA)
3087 pa->pa_free += ac->ac_b_ex.fe_len;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003088}
3089
3090/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003091 * use blocks preallocated to inode
3092 */
3093static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3094 struct ext4_prealloc_space *pa)
3095{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003096 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003097 ext4_fsblk_t start;
3098 ext4_fsblk_t end;
3099 int len;
3100
3101 /* found preallocated blocks, use them */
3102 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003103 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3104 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3105 len = EXT4_NUM_B2C(sbi, end - start);
Alex Tomasc9de5602008-01-29 00:19:52 -05003106 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3107 &ac->ac_b_ex.fe_start);
3108 ac->ac_b_ex.fe_len = len;
3109 ac->ac_status = AC_STATUS_FOUND;
3110 ac->ac_pa = pa;
3111
3112 BUG_ON(start < pa->pa_pstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003113 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
Alex Tomasc9de5602008-01-29 00:19:52 -05003114 BUG_ON(pa->pa_free < len);
3115 pa->pa_free -= len;
3116
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003117 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003118}
3119
3120/*
3121 * use blocks preallocated to locality group
3122 */
3123static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3124 struct ext4_prealloc_space *pa)
3125{
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04003126 unsigned int len = ac->ac_o_ex.fe_len;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003127
Alex Tomasc9de5602008-01-29 00:19:52 -05003128 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3129 &ac->ac_b_ex.fe_group,
3130 &ac->ac_b_ex.fe_start);
3131 ac->ac_b_ex.fe_len = len;
3132 ac->ac_status = AC_STATUS_FOUND;
3133 ac->ac_pa = pa;
3134
3135 /* we don't correct pa_pstart or pa_plen here to avoid
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003136 * possible race when the group is being loaded concurrently
Alex Tomasc9de5602008-01-29 00:19:52 -05003137 * instead we correct pa later, after blocks are marked
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003138 * in on-disk bitmap -- see ext4_mb_release_context()
3139 * Other CPUs are prevented from allocating from this pa by lg_mutex
Alex Tomasc9de5602008-01-29 00:19:52 -05003140 */
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003141 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003142}
3143
3144/*
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003145 * Return the prealloc space that have minimal distance
3146 * from the goal block. @cpa is the prealloc
3147 * space that is having currently known minimal distance
3148 * from the goal block.
3149 */
3150static struct ext4_prealloc_space *
3151ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3152 struct ext4_prealloc_space *pa,
3153 struct ext4_prealloc_space *cpa)
3154{
3155 ext4_fsblk_t cur_distance, new_distance;
3156
3157 if (cpa == NULL) {
3158 atomic_inc(&pa->pa_count);
3159 return pa;
3160 }
3161 cur_distance = abs(goal_block - cpa->pa_pstart);
3162 new_distance = abs(goal_block - pa->pa_pstart);
3163
Coly Li5a54b2f2011-02-24 14:10:05 -05003164 if (cur_distance <= new_distance)
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003165 return cpa;
3166
3167 /* drop the previous reference */
3168 atomic_dec(&cpa->pa_count);
3169 atomic_inc(&pa->pa_count);
3170 return pa;
3171}
3172
3173/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003174 * search goal blocks in preallocated space
3175 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003176static noinline_for_stack int
3177ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003178{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003179 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003180 int order, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05003181 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3182 struct ext4_locality_group *lg;
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003183 struct ext4_prealloc_space *pa, *cpa = NULL;
3184 ext4_fsblk_t goal_block;
Alex Tomasc9de5602008-01-29 00:19:52 -05003185
3186 /* only data can be preallocated */
3187 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3188 return 0;
3189
3190 /* first, try per-file preallocation */
3191 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003192 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003193
3194 /* all fields in this condition don't change,
3195 * so we can skip locking for them */
3196 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003197 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3198 EXT4_C2B(sbi, pa->pa_len)))
Alex Tomasc9de5602008-01-29 00:19:52 -05003199 continue;
3200
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003201 /* non-extent files can't have physical blocks past 2^32 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04003202 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003203 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3204 EXT4_MAX_BLOCK_FILE_PHYS))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003205 continue;
3206
Alex Tomasc9de5602008-01-29 00:19:52 -05003207 /* found preallocated blocks, use them */
3208 spin_lock(&pa->pa_lock);
3209 if (pa->pa_deleted == 0 && pa->pa_free) {
3210 atomic_inc(&pa->pa_count);
3211 ext4_mb_use_inode_pa(ac, pa);
3212 spin_unlock(&pa->pa_lock);
3213 ac->ac_criteria = 10;
3214 rcu_read_unlock();
3215 return 1;
3216 }
3217 spin_unlock(&pa->pa_lock);
3218 }
3219 rcu_read_unlock();
3220
3221 /* can we use group allocation? */
3222 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3223 return 0;
3224
3225 /* inode may have no locality group for some reason */
3226 lg = ac->ac_lg;
3227 if (lg == NULL)
3228 return 0;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003229 order = fls(ac->ac_o_ex.fe_len) - 1;
3230 if (order > PREALLOC_TB_SIZE - 1)
3231 /* The max size of hash table is PREALLOC_TB_SIZE */
3232 order = PREALLOC_TB_SIZE - 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003233
Akinobu Mitabda00de2010-03-03 23:53:25 -05003234 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003235 /*
3236 * search for the prealloc space that is having
3237 * minimal distance from the goal block.
3238 */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003239 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3240 rcu_read_lock();
3241 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3242 pa_inode_list) {
3243 spin_lock(&pa->pa_lock);
3244 if (pa->pa_deleted == 0 &&
3245 pa->pa_free >= ac->ac_o_ex.fe_len) {
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003246
3247 cpa = ext4_mb_check_group_pa(goal_block,
3248 pa, cpa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003249 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003250 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05003251 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003252 rcu_read_unlock();
Alex Tomasc9de5602008-01-29 00:19:52 -05003253 }
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003254 if (cpa) {
3255 ext4_mb_use_group_pa(ac, cpa);
3256 ac->ac_criteria = 20;
3257 return 1;
3258 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003259 return 0;
3260}
3261
3262/*
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003263 * the function goes through all block freed in the group
3264 * but not yet committed and marks them used in in-core bitmap.
3265 * buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003266 * Need to be called with the ext4 group lock held
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003267 */
3268static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3269 ext4_group_t group)
3270{
3271 struct rb_node *n;
3272 struct ext4_group_info *grp;
3273 struct ext4_free_data *entry;
3274
3275 grp = ext4_get_group_info(sb, group);
3276 n = rb_first(&(grp->bb_free_root));
3277
3278 while (n) {
Bobi Jam18aadd42012-02-20 17:53:02 -05003279 entry = rb_entry(n, struct ext4_free_data, efd_node);
3280 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003281 n = rb_next(n);
3282 }
3283 return;
3284}
3285
3286/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003287 * the function goes through all preallocation in this group and marks them
3288 * used in in-core bitmap. buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003289 * Need to be called with ext4 group lock held
Alex Tomasc9de5602008-01-29 00:19:52 -05003290 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04003291static noinline_for_stack
3292void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
Alex Tomasc9de5602008-01-29 00:19:52 -05003293 ext4_group_t group)
3294{
3295 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3296 struct ext4_prealloc_space *pa;
3297 struct list_head *cur;
3298 ext4_group_t groupnr;
3299 ext4_grpblk_t start;
3300 int preallocated = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003301 int len;
3302
3303 /* all form of preallocation discards first load group,
3304 * so the only competing code is preallocation use.
3305 * we don't need any locking here
3306 * notice we do NOT ignore preallocations with pa_deleted
3307 * otherwise we could leave used blocks available for
3308 * allocation in buddy when concurrent ext4_mb_put_pa()
3309 * is dropping preallocation
3310 */
3311 list_for_each(cur, &grp->bb_prealloc_list) {
3312 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3313 spin_lock(&pa->pa_lock);
3314 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3315 &groupnr, &start);
3316 len = pa->pa_len;
3317 spin_unlock(&pa->pa_lock);
3318 if (unlikely(len == 0))
3319 continue;
3320 BUG_ON(groupnr != group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04003321 ext4_set_bits(bitmap, start, len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003322 preallocated += len;
Alex Tomasc9de5602008-01-29 00:19:52 -05003323 }
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003324 mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003325}
3326
3327static void ext4_mb_pa_callback(struct rcu_head *head)
3328{
3329 struct ext4_prealloc_space *pa;
3330 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3331 kmem_cache_free(ext4_pspace_cachep, pa);
3332}
3333
3334/*
3335 * drops a reference to preallocated space descriptor
3336 * if this was the last reference and the space is consumed
3337 */
3338static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3339 struct super_block *sb, struct ext4_prealloc_space *pa)
3340{
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05003341 ext4_group_t grp;
Eric Sandeend33a1972009-03-16 23:25:40 -04003342 ext4_fsblk_t grp_blk;
Alex Tomasc9de5602008-01-29 00:19:52 -05003343
3344 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3345 return;
3346
3347 /* in this short window concurrent discard can set pa_deleted */
3348 spin_lock(&pa->pa_lock);
3349 if (pa->pa_deleted == 1) {
3350 spin_unlock(&pa->pa_lock);
3351 return;
3352 }
3353
3354 pa->pa_deleted = 1;
3355 spin_unlock(&pa->pa_lock);
3356
Eric Sandeend33a1972009-03-16 23:25:40 -04003357 grp_blk = pa->pa_pstart;
Theodore Ts'o60e66792010-05-17 07:00:00 -04003358 /*
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003359 * If doing group-based preallocation, pa_pstart may be in the
3360 * next group when pa is used up
3361 */
3362 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeend33a1972009-03-16 23:25:40 -04003363 grp_blk--;
3364
3365 ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
Alex Tomasc9de5602008-01-29 00:19:52 -05003366
3367 /*
3368 * possible race:
3369 *
3370 * P1 (buddy init) P2 (regular allocation)
3371 * find block B in PA
3372 * copy on-disk bitmap to buddy
3373 * mark B in on-disk bitmap
3374 * drop PA from group
3375 * mark all PAs in buddy
3376 *
3377 * thus, P1 initializes buddy with B available. to prevent this
3378 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3379 * against that pair
3380 */
3381 ext4_lock_group(sb, grp);
3382 list_del(&pa->pa_group_list);
3383 ext4_unlock_group(sb, grp);
3384
3385 spin_lock(pa->pa_obj_lock);
3386 list_del_rcu(&pa->pa_inode_list);
3387 spin_unlock(pa->pa_obj_lock);
3388
3389 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3390}
3391
3392/*
3393 * creates new preallocated space for given inode
3394 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003395static noinline_for_stack int
3396ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003397{
3398 struct super_block *sb = ac->ac_sb;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003399 struct ext4_sb_info *sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003400 struct ext4_prealloc_space *pa;
3401 struct ext4_group_info *grp;
3402 struct ext4_inode_info *ei;
3403
3404 /* preallocate only when found space is larger then requested */
3405 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3406 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3407 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3408
3409 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3410 if (pa == NULL)
3411 return -ENOMEM;
3412
3413 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3414 int winl;
3415 int wins;
3416 int win;
3417 int offs;
3418
3419 /* we can't allocate as much as normalizer wants.
3420 * so, found space must get proper lstart
3421 * to cover original request */
3422 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3423 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3424
3425 /* we're limited by original request in that
3426 * logical block must be covered any way
3427 * winl is window we can move our chunk within */
3428 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3429
3430 /* also, we should cover whole original request */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003431 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003432
3433 /* the smallest one defines real window */
3434 win = min(winl, wins);
3435
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003436 offs = ac->ac_o_ex.fe_logical %
3437 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003438 if (offs && offs < win)
3439 win = offs;
3440
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003441 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
3442 EXT4_B2C(sbi, win);
Alex Tomasc9de5602008-01-29 00:19:52 -05003443 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3444 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3445 }
3446
3447 /* preallocation can change ac_b_ex, thus we store actually
3448 * allocated blocks for history */
3449 ac->ac_f_ex = ac->ac_b_ex;
3450
3451 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3452 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3453 pa->pa_len = ac->ac_b_ex.fe_len;
3454 pa->pa_free = pa->pa_len;
3455 atomic_set(&pa->pa_count, 1);
3456 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003457 INIT_LIST_HEAD(&pa->pa_inode_list);
3458 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003459 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003460 pa->pa_type = MB_INODE_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003461
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003462 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
Alex Tomasc9de5602008-01-29 00:19:52 -05003463 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003464 trace_ext4_mb_new_inode_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003465
3466 ext4_mb_use_inode_pa(ac, pa);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003467 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
Alex Tomasc9de5602008-01-29 00:19:52 -05003468
3469 ei = EXT4_I(ac->ac_inode);
3470 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3471
3472 pa->pa_obj_lock = &ei->i_prealloc_lock;
3473 pa->pa_inode = ac->ac_inode;
3474
3475 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3476 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3477 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3478
3479 spin_lock(pa->pa_obj_lock);
3480 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3481 spin_unlock(pa->pa_obj_lock);
3482
3483 return 0;
3484}
3485
3486/*
3487 * creates new preallocated space for locality group inodes belongs to
3488 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003489static noinline_for_stack int
3490ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003491{
3492 struct super_block *sb = ac->ac_sb;
3493 struct ext4_locality_group *lg;
3494 struct ext4_prealloc_space *pa;
3495 struct ext4_group_info *grp;
3496
3497 /* preallocate only when found space is larger then requested */
3498 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3499 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3500 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3501
3502 BUG_ON(ext4_pspace_cachep == NULL);
3503 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3504 if (pa == NULL)
3505 return -ENOMEM;
3506
3507 /* preallocation can change ac_b_ex, thus we store actually
3508 * allocated blocks for history */
3509 ac->ac_f_ex = ac->ac_b_ex;
3510
3511 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3512 pa->pa_lstart = pa->pa_pstart;
3513 pa->pa_len = ac->ac_b_ex.fe_len;
3514 pa->pa_free = pa->pa_len;
3515 atomic_set(&pa->pa_count, 1);
3516 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003517 INIT_LIST_HEAD(&pa->pa_inode_list);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003518 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003519 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003520 pa->pa_type = MB_GROUP_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003521
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003522 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003523 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3524 trace_ext4_mb_new_group_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003525
3526 ext4_mb_use_group_pa(ac, pa);
3527 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3528
3529 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3530 lg = ac->ac_lg;
3531 BUG_ON(lg == NULL);
3532
3533 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3534 pa->pa_inode = NULL;
3535
3536 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3537 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3538 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3539
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003540 /*
3541 * We will later add the new pa to the right bucket
3542 * after updating the pa_free in ext4_mb_release_context
3543 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003544 return 0;
3545}
3546
3547static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3548{
3549 int err;
3550
3551 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3552 err = ext4_mb_new_group_pa(ac);
3553 else
3554 err = ext4_mb_new_inode_pa(ac);
3555 return err;
3556}
3557
3558/*
3559 * finds all unused blocks in on-disk bitmap, frees them in
3560 * in-core bitmap and buddy.
3561 * @pa must be unlinked from inode and group lists, so that
3562 * nobody else can find/use it.
3563 * the caller MUST hold group/inode locks.
3564 * TODO: optimize the case when there are no in-core structures yet
3565 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003566static noinline_for_stack int
3567ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003568 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003569{
Alex Tomasc9de5602008-01-29 00:19:52 -05003570 struct super_block *sb = e4b->bd_sb;
3571 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003572 unsigned int end;
3573 unsigned int next;
Alex Tomasc9de5602008-01-29 00:19:52 -05003574 ext4_group_t group;
3575 ext4_grpblk_t bit;
Theodore Ts'oba80b102009-01-03 20:03:21 -05003576 unsigned long long grp_blk_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05003577 int err = 0;
3578 int free = 0;
3579
3580 BUG_ON(pa->pa_deleted == 0);
3581 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003582 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003583 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3584 end = bit + pa->pa_len;
3585
Alex Tomasc9de5602008-01-29 00:19:52 -05003586 while (bit < end) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003587 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003588 if (bit >= end)
3589 break;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003590 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003591 mb_debug(1, " free preallocated %u/%u in group %u\n",
Andi Kleen5a0790c2010-06-14 13:28:03 -04003592 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3593 (unsigned) next - bit, (unsigned) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003594 free += next - bit;
3595
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003596 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003597 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3598 EXT4_C2B(sbi, bit)),
Lukas Czernera9c667f2011-06-06 09:51:52 -04003599 next - bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003600 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3601 bit = next + 1;
3602 }
3603 if (free != pa->pa_free) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003604 ext4_msg(e4b->bd_sb, KERN_CRIT,
3605 "pa %p: logic %lu, phys. %lu, len %lu",
3606 pa, (unsigned long) pa->pa_lstart,
3607 (unsigned long) pa->pa_pstart,
3608 (unsigned long) pa->pa_len);
Theodore Ts'oe29136f2010-06-29 12:54:28 -04003609 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -05003610 free, pa->pa_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05003611 /*
3612 * pa is already deleted so we use the value obtained
3613 * from the bitmap and continue.
3614 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003615 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003616 atomic_add(free, &sbi->s_mb_discarded);
3617
3618 return err;
3619}
3620
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003621static noinline_for_stack int
3622ext4_mb_release_group_pa(struct ext4_buddy *e4b,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003623 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003624{
Alex Tomasc9de5602008-01-29 00:19:52 -05003625 struct super_block *sb = e4b->bd_sb;
3626 ext4_group_t group;
3627 ext4_grpblk_t bit;
3628
Yongqiang Yang60e07cf2011-12-18 15:49:54 -05003629 trace_ext4_mb_release_group_pa(sb, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003630 BUG_ON(pa->pa_deleted == 0);
3631 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3632 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3633 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3634 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003635 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003636
3637 return 0;
3638}
3639
3640/*
3641 * releases all preallocations in given group
3642 *
3643 * first, we need to decide discard policy:
3644 * - when do we discard
3645 * 1) ENOSPC
3646 * - how many do we discard
3647 * 1) how many requested
3648 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003649static noinline_for_stack int
3650ext4_mb_discard_group_preallocations(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -05003651 ext4_group_t group, int needed)
3652{
3653 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3654 struct buffer_head *bitmap_bh = NULL;
3655 struct ext4_prealloc_space *pa, *tmp;
3656 struct list_head list;
3657 struct ext4_buddy e4b;
3658 int err;
3659 int busy = 0;
3660 int free = 0;
3661
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003662 mb_debug(1, "discard preallocation for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003663
3664 if (list_empty(&grp->bb_prealloc_list))
3665 return 0;
3666
Theodore Ts'o574ca172008-07-11 19:27:31 -04003667 bitmap_bh = ext4_read_block_bitmap(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003668 if (bitmap_bh == NULL) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05003669 ext4_error(sb, "Error reading block bitmap for %u", group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003670 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003671 }
3672
3673 err = ext4_mb_load_buddy(sb, group, &e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003674 if (err) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05003675 ext4_error(sb, "Error loading buddy information for %u", group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003676 put_bh(bitmap_bh);
3677 return 0;
3678 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003679
3680 if (needed == 0)
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04003681 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003682
Alex Tomasc9de5602008-01-29 00:19:52 -05003683 INIT_LIST_HEAD(&list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003684repeat:
3685 ext4_lock_group(sb, group);
3686 list_for_each_entry_safe(pa, tmp,
3687 &grp->bb_prealloc_list, pa_group_list) {
3688 spin_lock(&pa->pa_lock);
3689 if (atomic_read(&pa->pa_count)) {
3690 spin_unlock(&pa->pa_lock);
3691 busy = 1;
3692 continue;
3693 }
3694 if (pa->pa_deleted) {
3695 spin_unlock(&pa->pa_lock);
3696 continue;
3697 }
3698
3699 /* seems this one can be freed ... */
3700 pa->pa_deleted = 1;
3701
3702 /* we can trust pa_free ... */
3703 free += pa->pa_free;
3704
3705 spin_unlock(&pa->pa_lock);
3706
3707 list_del(&pa->pa_group_list);
3708 list_add(&pa->u.pa_tmp_list, &list);
3709 }
3710
3711 /* if we still need more blocks and some PAs were used, try again */
3712 if (free < needed && busy) {
3713 busy = 0;
3714 ext4_unlock_group(sb, group);
3715 /*
3716 * Yield the CPU here so that we don't get soft lockup
3717 * in non preempt case.
3718 */
3719 yield();
3720 goto repeat;
3721 }
3722
3723 /* found anything to free? */
3724 if (list_empty(&list)) {
3725 BUG_ON(free != 0);
3726 goto out;
3727 }
3728
3729 /* now free all selected PAs */
3730 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3731
3732 /* remove from object (inode or locality group) */
3733 spin_lock(pa->pa_obj_lock);
3734 list_del_rcu(&pa->pa_inode_list);
3735 spin_unlock(pa->pa_obj_lock);
3736
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003737 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003738 ext4_mb_release_group_pa(&e4b, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003739 else
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003740 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003741
3742 list_del(&pa->u.pa_tmp_list);
3743 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3744 }
3745
3746out:
3747 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04003748 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05003749 put_bh(bitmap_bh);
3750 return free;
3751}
3752
3753/*
3754 * releases all non-used preallocated blocks for given inode
3755 *
3756 * It's important to discard preallocations under i_data_sem
3757 * We don't want another block to be served from the prealloc
3758 * space when we are discarding the inode prealloc space.
3759 *
3760 * FIXME!! Make sure it is valid at all the call sites
3761 */
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003762void ext4_discard_preallocations(struct inode *inode)
Alex Tomasc9de5602008-01-29 00:19:52 -05003763{
3764 struct ext4_inode_info *ei = EXT4_I(inode);
3765 struct super_block *sb = inode->i_sb;
3766 struct buffer_head *bitmap_bh = NULL;
3767 struct ext4_prealloc_space *pa, *tmp;
3768 ext4_group_t group = 0;
3769 struct list_head list;
3770 struct ext4_buddy e4b;
3771 int err;
3772
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003773 if (!S_ISREG(inode->i_mode)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003774 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3775 return;
3776 }
3777
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003778 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003779 trace_ext4_discard_preallocations(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05003780
3781 INIT_LIST_HEAD(&list);
3782
3783repeat:
3784 /* first, collect all pa's in the inode */
3785 spin_lock(&ei->i_prealloc_lock);
3786 while (!list_empty(&ei->i_prealloc_list)) {
3787 pa = list_entry(ei->i_prealloc_list.next,
3788 struct ext4_prealloc_space, pa_inode_list);
3789 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3790 spin_lock(&pa->pa_lock);
3791 if (atomic_read(&pa->pa_count)) {
3792 /* this shouldn't happen often - nobody should
3793 * use preallocation while we're discarding it */
3794 spin_unlock(&pa->pa_lock);
3795 spin_unlock(&ei->i_prealloc_lock);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003796 ext4_msg(sb, KERN_ERR,
3797 "uh-oh! used pa while discarding");
Alex Tomasc9de5602008-01-29 00:19:52 -05003798 WARN_ON(1);
3799 schedule_timeout_uninterruptible(HZ);
3800 goto repeat;
3801
3802 }
3803 if (pa->pa_deleted == 0) {
3804 pa->pa_deleted = 1;
3805 spin_unlock(&pa->pa_lock);
3806 list_del_rcu(&pa->pa_inode_list);
3807 list_add(&pa->u.pa_tmp_list, &list);
3808 continue;
3809 }
3810
3811 /* someone is deleting pa right now */
3812 spin_unlock(&pa->pa_lock);
3813 spin_unlock(&ei->i_prealloc_lock);
3814
3815 /* we have to wait here because pa_deleted
3816 * doesn't mean pa is already unlinked from
3817 * the list. as we might be called from
3818 * ->clear_inode() the inode will get freed
3819 * and concurrent thread which is unlinking
3820 * pa from inode's list may access already
3821 * freed memory, bad-bad-bad */
3822
3823 /* XXX: if this happens too often, we can
3824 * add a flag to force wait only in case
3825 * of ->clear_inode(), but not in case of
3826 * regular truncate */
3827 schedule_timeout_uninterruptible(HZ);
3828 goto repeat;
3829 }
3830 spin_unlock(&ei->i_prealloc_lock);
3831
3832 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003833 BUG_ON(pa->pa_type != MB_INODE_PA);
Alex Tomasc9de5602008-01-29 00:19:52 -05003834 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3835
3836 err = ext4_mb_load_buddy(sb, group, &e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003837 if (err) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05003838 ext4_error(sb, "Error loading buddy information for %u",
3839 group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003840 continue;
3841 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003842
Theodore Ts'o574ca172008-07-11 19:27:31 -04003843 bitmap_bh = ext4_read_block_bitmap(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003844 if (bitmap_bh == NULL) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05003845 ext4_error(sb, "Error reading block bitmap for %u",
3846 group);
Jing Zhange39e07f2010-05-14 00:00:00 -04003847 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003848 continue;
Alex Tomasc9de5602008-01-29 00:19:52 -05003849 }
3850
3851 ext4_lock_group(sb, group);
3852 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003853 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003854 ext4_unlock_group(sb, group);
3855
Jing Zhange39e07f2010-05-14 00:00:00 -04003856 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05003857 put_bh(bitmap_bh);
3858
3859 list_del(&pa->u.pa_tmp_list);
3860 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3861 }
3862}
3863
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003864#ifdef CONFIG_EXT4_DEBUG
Alex Tomasc9de5602008-01-29 00:19:52 -05003865static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3866{
3867 struct super_block *sb = ac->ac_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -04003868 ext4_group_t ngroups, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05003869
Theodore Ts'o4dd89fc2011-02-27 17:23:47 -05003870 if (!mb_enable_debug ||
3871 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
Eric Sandeene3570632010-07-27 11:56:08 -04003872 return;
3873
Joe Perches7f6a11e2012-03-19 23:09:43 -04003874 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003875 " Allocation context details:");
Joe Perches7f6a11e2012-03-19 23:09:43 -04003876 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05003877 ac->ac_status, ac->ac_flags);
Joe Perches7f6a11e2012-03-19 23:09:43 -04003878 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003879 "goal %lu/%lu/%lu@%lu, "
3880 "best %lu/%lu/%lu@%lu cr %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05003881 (unsigned long)ac->ac_o_ex.fe_group,
3882 (unsigned long)ac->ac_o_ex.fe_start,
3883 (unsigned long)ac->ac_o_ex.fe_len,
3884 (unsigned long)ac->ac_o_ex.fe_logical,
3885 (unsigned long)ac->ac_g_ex.fe_group,
3886 (unsigned long)ac->ac_g_ex.fe_start,
3887 (unsigned long)ac->ac_g_ex.fe_len,
3888 (unsigned long)ac->ac_g_ex.fe_logical,
3889 (unsigned long)ac->ac_b_ex.fe_group,
3890 (unsigned long)ac->ac_b_ex.fe_start,
3891 (unsigned long)ac->ac_b_ex.fe_len,
3892 (unsigned long)ac->ac_b_ex.fe_logical,
3893 (int)ac->ac_criteria);
Joe Perches7f6a11e2012-03-19 23:09:43 -04003894 ext4_msg(ac->ac_sb, KERN_ERR, "%lu scanned, %d found",
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003895 ac->ac_ex_scanned, ac->ac_found);
Joe Perches7f6a11e2012-03-19 23:09:43 -04003896 ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
Theodore Ts'o8df96752009-05-01 08:50:38 -04003897 ngroups = ext4_get_groups_count(sb);
3898 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003899 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3900 struct ext4_prealloc_space *pa;
3901 ext4_grpblk_t start;
3902 struct list_head *cur;
3903 ext4_lock_group(sb, i);
3904 list_for_each(cur, &grp->bb_prealloc_list) {
3905 pa = list_entry(cur, struct ext4_prealloc_space,
3906 pa_group_list);
3907 spin_lock(&pa->pa_lock);
3908 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3909 NULL, &start);
3910 spin_unlock(&pa->pa_lock);
Akira Fujita1c718502009-07-05 23:04:36 -04003911 printk(KERN_ERR "PA:%u:%d:%u \n", i,
3912 start, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003913 }
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -04003914 ext4_unlock_group(sb, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05003915
3916 if (grp->bb_free == 0)
3917 continue;
Akira Fujita1c718502009-07-05 23:04:36 -04003918 printk(KERN_ERR "%u: %d/%d \n",
Alex Tomasc9de5602008-01-29 00:19:52 -05003919 i, grp->bb_free, grp->bb_fragments);
3920 }
3921 printk(KERN_ERR "\n");
3922}
3923#else
3924static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3925{
3926 return;
3927}
3928#endif
3929
3930/*
3931 * We use locality group preallocation for small size file. The size of the
3932 * file is determined by the current size or the resulting size after
3933 * allocation which ever is larger
3934 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04003935 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
Alex Tomasc9de5602008-01-29 00:19:52 -05003936 */
3937static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3938{
3939 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3940 int bsbits = ac->ac_sb->s_blocksize_bits;
3941 loff_t size, isize;
3942
3943 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3944 return;
3945
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04003946 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3947 return;
3948
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003949 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Theodore Ts'o50797482009-09-18 13:34:02 -04003950 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
3951 >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003952
Theodore Ts'o50797482009-09-18 13:34:02 -04003953 if ((size == isize) &&
3954 !ext4_fs_is_busy(sbi) &&
3955 (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
3956 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
3957 return;
3958 }
3959
Robin Dongebbe0272011-10-26 05:14:27 -04003960 if (sbi->s_mb_group_prealloc <= 0) {
3961 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
3962 return;
3963 }
3964
Alex Tomasc9de5602008-01-29 00:19:52 -05003965 /* don't use group allocation for large files */
Theodore Ts'o71780572009-09-28 00:06:20 -04003966 size = max(size, isize);
Tao Macc483f12010-03-01 19:06:35 -05003967 if (size > sbi->s_mb_stream_request) {
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04003968 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
Alex Tomasc9de5602008-01-29 00:19:52 -05003969 return;
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04003970 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003971
3972 BUG_ON(ac->ac_lg != NULL);
3973 /*
3974 * locality group prealloc space are per cpu. The reason for having
3975 * per cpu locality group is to reduce the contention between block
3976 * request from multiple CPUs.
3977 */
Christoph Lameterca0c9582009-10-03 19:48:22 +09003978 ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05003979
3980 /* we're going to use group allocation */
3981 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
3982
3983 /* serialize all allocations in the group */
3984 mutex_lock(&ac->ac_lg->lg_mutex);
3985}
3986
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003987static noinline_for_stack int
3988ext4_mb_initialize_context(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05003989 struct ext4_allocation_request *ar)
3990{
3991 struct super_block *sb = ar->inode->i_sb;
3992 struct ext4_sb_info *sbi = EXT4_SB(sb);
3993 struct ext4_super_block *es = sbi->s_es;
3994 ext4_group_t group;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003995 unsigned int len;
3996 ext4_fsblk_t goal;
Alex Tomasc9de5602008-01-29 00:19:52 -05003997 ext4_grpblk_t block;
3998
3999 /* we can't allocate > group size */
4000 len = ar->len;
4001
4002 /* just a dirty hack to filter too big requests */
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04004003 if (len >= EXT4_CLUSTERS_PER_GROUP(sb) - 10)
4004 len = EXT4_CLUSTERS_PER_GROUP(sb) - 10;
Alex Tomasc9de5602008-01-29 00:19:52 -05004005
4006 /* start searching from the goal */
4007 goal = ar->goal;
4008 if (goal < le32_to_cpu(es->s_first_data_block) ||
4009 goal >= ext4_blocks_count(es))
4010 goal = le32_to_cpu(es->s_first_data_block);
4011 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4012
4013 /* set up allocation goals */
Theodore Ts'o833576b2009-07-13 09:45:52 -04004014 memset(ac, 0, sizeof(struct ext4_allocation_context));
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004015 ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
Alex Tomasc9de5602008-01-29 00:19:52 -05004016 ac->ac_status = AC_STATUS_CONTINUE;
Alex Tomasc9de5602008-01-29 00:19:52 -05004017 ac->ac_sb = sb;
4018 ac->ac_inode = ar->inode;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004019 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
Alex Tomasc9de5602008-01-29 00:19:52 -05004020 ac->ac_o_ex.fe_group = group;
4021 ac->ac_o_ex.fe_start = block;
4022 ac->ac_o_ex.fe_len = len;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004023 ac->ac_g_ex = ac->ac_o_ex;
Alex Tomasc9de5602008-01-29 00:19:52 -05004024 ac->ac_flags = ar->flags;
Alex Tomasc9de5602008-01-29 00:19:52 -05004025
4026 /* we have to define context: we'll we work with a file or
4027 * locality group. this is a policy, actually */
4028 ext4_mb_group_or_file(ac);
4029
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004030 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
Alex Tomasc9de5602008-01-29 00:19:52 -05004031 "left: %u/%u, right %u/%u to %swritable\n",
4032 (unsigned) ar->len, (unsigned) ar->logical,
4033 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4034 (unsigned) ar->lleft, (unsigned) ar->pleft,
4035 (unsigned) ar->lright, (unsigned) ar->pright,
4036 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4037 return 0;
4038
4039}
4040
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004041static noinline_for_stack void
4042ext4_mb_discard_lg_preallocations(struct super_block *sb,
4043 struct ext4_locality_group *lg,
4044 int order, int total_entries)
4045{
4046 ext4_group_t group = 0;
4047 struct ext4_buddy e4b;
4048 struct list_head discard_list;
4049 struct ext4_prealloc_space *pa, *tmp;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004050
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004051 mb_debug(1, "discard locality group preallocation\n");
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004052
4053 INIT_LIST_HEAD(&discard_list);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004054
4055 spin_lock(&lg->lg_prealloc_lock);
4056 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4057 pa_inode_list) {
4058 spin_lock(&pa->pa_lock);
4059 if (atomic_read(&pa->pa_count)) {
4060 /*
4061 * This is the pa that we just used
4062 * for block allocation. So don't
4063 * free that
4064 */
4065 spin_unlock(&pa->pa_lock);
4066 continue;
4067 }
4068 if (pa->pa_deleted) {
4069 spin_unlock(&pa->pa_lock);
4070 continue;
4071 }
4072 /* only lg prealloc space */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004073 BUG_ON(pa->pa_type != MB_GROUP_PA);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004074
4075 /* seems this one can be freed ... */
4076 pa->pa_deleted = 1;
4077 spin_unlock(&pa->pa_lock);
4078
4079 list_del_rcu(&pa->pa_inode_list);
4080 list_add(&pa->u.pa_tmp_list, &discard_list);
4081
4082 total_entries--;
4083 if (total_entries <= 5) {
4084 /*
4085 * we want to keep only 5 entries
4086 * allowing it to grow to 8. This
4087 * mak sure we don't call discard
4088 * soon for this list.
4089 */
4090 break;
4091 }
4092 }
4093 spin_unlock(&lg->lg_prealloc_lock);
4094
4095 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4096
4097 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4098 if (ext4_mb_load_buddy(sb, group, &e4b)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05004099 ext4_error(sb, "Error loading buddy information for %u",
4100 group);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004101 continue;
4102 }
4103 ext4_lock_group(sb, group);
4104 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04004105 ext4_mb_release_group_pa(&e4b, pa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004106 ext4_unlock_group(sb, group);
4107
Jing Zhange39e07f2010-05-14 00:00:00 -04004108 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004109 list_del(&pa->u.pa_tmp_list);
4110 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4111 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004112}
4113
4114/*
4115 * We have incremented pa_count. So it cannot be freed at this
4116 * point. Also we hold lg_mutex. So no parallel allocation is
4117 * possible from this lg. That means pa_free cannot be updated.
4118 *
4119 * A parallel ext4_mb_discard_group_preallocations is possible.
4120 * which can cause the lg_prealloc_list to be updated.
4121 */
4122
4123static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4124{
4125 int order, added = 0, lg_prealloc_count = 1;
4126 struct super_block *sb = ac->ac_sb;
4127 struct ext4_locality_group *lg = ac->ac_lg;
4128 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4129
4130 order = fls(pa->pa_free) - 1;
4131 if (order > PREALLOC_TB_SIZE - 1)
4132 /* The max size of hash table is PREALLOC_TB_SIZE */
4133 order = PREALLOC_TB_SIZE - 1;
4134 /* Add the prealloc space to lg */
4135 rcu_read_lock();
4136 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4137 pa_inode_list) {
4138 spin_lock(&tmp_pa->pa_lock);
4139 if (tmp_pa->pa_deleted) {
Theodore Ts'oe7c9e3e2009-03-27 19:43:21 -04004140 spin_unlock(&tmp_pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004141 continue;
4142 }
4143 if (!added && pa->pa_free < tmp_pa->pa_free) {
4144 /* Add to the tail of the previous entry */
4145 list_add_tail_rcu(&pa->pa_inode_list,
4146 &tmp_pa->pa_inode_list);
4147 added = 1;
4148 /*
4149 * we want to count the total
4150 * number of entries in the list
4151 */
4152 }
4153 spin_unlock(&tmp_pa->pa_lock);
4154 lg_prealloc_count++;
4155 }
4156 if (!added)
4157 list_add_tail_rcu(&pa->pa_inode_list,
4158 &lg->lg_prealloc_list[order]);
4159 rcu_read_unlock();
4160
4161 /* Now trim the list to be not more than 8 elements */
4162 if (lg_prealloc_count > 8) {
4163 ext4_mb_discard_lg_preallocations(sb, lg,
4164 order, lg_prealloc_count);
4165 return;
4166 }
4167 return ;
4168}
4169
Alex Tomasc9de5602008-01-29 00:19:52 -05004170/*
4171 * release all resource we used in allocation
4172 */
4173static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4174{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004175 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004176 struct ext4_prealloc_space *pa = ac->ac_pa;
4177 if (pa) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004178 if (pa->pa_type == MB_GROUP_PA) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004179 /* see comment in ext4_mb_use_group_pa() */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004180 spin_lock(&pa->pa_lock);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004181 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4182 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004183 pa->pa_free -= ac->ac_b_ex.fe_len;
4184 pa->pa_len -= ac->ac_b_ex.fe_len;
4185 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05004186 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004187 }
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004188 if (pa) {
4189 /*
4190 * We want to add the pa to the right bucket.
4191 * Remove it from the list and while adding
4192 * make sure the list to which we are adding
Amir Goldstein44183d42011-05-09 21:52:36 -04004193 * doesn't grow big.
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004194 */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004195 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004196 spin_lock(pa->pa_obj_lock);
4197 list_del_rcu(&pa->pa_inode_list);
4198 spin_unlock(pa->pa_obj_lock);
4199 ext4_mb_add_n_trim(ac);
4200 }
4201 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4202 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004203 if (ac->ac_bitmap_page)
4204 page_cache_release(ac->ac_bitmap_page);
4205 if (ac->ac_buddy_page)
4206 page_cache_release(ac->ac_buddy_page);
4207 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4208 mutex_unlock(&ac->ac_lg->lg_mutex);
4209 ext4_mb_collect_stats(ac);
4210 return 0;
4211}
4212
4213static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4214{
Theodore Ts'o8df96752009-05-01 08:50:38 -04004215 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004216 int ret;
4217 int freed = 0;
4218
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004219 trace_ext4_mb_discard_preallocations(sb, needed);
Theodore Ts'o8df96752009-05-01 08:50:38 -04004220 for (i = 0; i < ngroups && needed > 0; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004221 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4222 freed += ret;
4223 needed -= ret;
4224 }
4225
4226 return freed;
4227}
4228
4229/*
4230 * Main entry point into mballoc to allocate blocks
4231 * it tries to use preallocation first, then falls back
4232 * to usual allocation
4233 */
4234ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
Aditya Kali6c7a1202010-08-05 16:22:24 -04004235 struct ext4_allocation_request *ar, int *errp)
Alex Tomasc9de5602008-01-29 00:19:52 -05004236{
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04004237 int freed;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004238 struct ext4_allocation_context *ac = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004239 struct ext4_sb_info *sbi;
4240 struct super_block *sb;
4241 ext4_fsblk_t block = 0;
Mingming Cao60e58e02009-01-22 18:13:05 +01004242 unsigned int inquota = 0;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004243 unsigned int reserv_clstrs = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004244
4245 sb = ar->inode->i_sb;
4246 sbi = EXT4_SB(sb);
4247
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004248 trace_ext4_request_blocks(ar);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004249
Dmitry Monakhov45dc63e2011-10-20 20:07:23 -04004250 /* Allow to use superuser reservation for quota file */
4251 if (IS_NOQUOTA(ar->inode))
4252 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4253
Mingming Cao60e58e02009-01-22 18:13:05 +01004254 /*
4255 * For delayed allocation, we could skip the ENOSPC and
4256 * EDQUOT check, as blocks and quotas have been already
4257 * reserved when data being copied into pagecache.
4258 */
Theodore Ts'of2321092011-01-10 12:12:36 -05004259 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
Mingming Cao60e58e02009-01-22 18:13:05 +01004260 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4261 else {
4262 /* Without delayed allocation we need to verify
4263 * there is enough free blocks to do block allocation
4264 * and verify allocation doesn't exceed the quota limits.
Mingming Caod2a17632008-07-14 17:52:37 -04004265 */
Allison Henderson55f020d2011-05-25 07:41:26 -04004266 while (ar->len &&
Theodore Ts'oe7d5f312011-09-09 19:14:51 -04004267 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004268
Aneesh Kumar K.V030ba6b2008-09-08 23:14:50 -04004269 /* let others to free the space */
4270 yield();
4271 ar->len = ar->len >> 1;
4272 }
4273 if (!ar->len) {
Aneesh Kumar K.Va30d542a2008-10-09 10:56:23 -04004274 *errp = -ENOSPC;
4275 return 0;
4276 }
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004277 reserv_clstrs = ar->len;
Allison Henderson55f020d2011-05-25 07:41:26 -04004278 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004279 dquot_alloc_block_nofail(ar->inode,
4280 EXT4_C2B(sbi, ar->len));
Allison Henderson55f020d2011-05-25 07:41:26 -04004281 } else {
4282 while (ar->len &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004283 dquot_alloc_block(ar->inode,
4284 EXT4_C2B(sbi, ar->len))) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004285
4286 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4287 ar->len--;
4288 }
Mingming Cao60e58e02009-01-22 18:13:05 +01004289 }
4290 inquota = ar->len;
4291 if (ar->len == 0) {
4292 *errp = -EDQUOT;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004293 goto out;
Mingming Cao60e58e02009-01-22 18:13:05 +01004294 }
Mingming Caod2a17632008-07-14 17:52:37 -04004295 }
Mingming Caod2a17632008-07-14 17:52:37 -04004296
Eric Sandeen256bdb42008-02-10 01:13:33 -05004297 ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
Theodore Ts'o833576b2009-07-13 09:45:52 -04004298 if (!ac) {
Shen Feng363d4252008-07-11 19:27:31 -04004299 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004300 *errp = -ENOMEM;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004301 goto out;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004302 }
4303
Eric Sandeen256bdb42008-02-10 01:13:33 -05004304 *errp = ext4_mb_initialize_context(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004305 if (*errp) {
4306 ar->len = 0;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004307 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05004308 }
4309
Eric Sandeen256bdb42008-02-10 01:13:33 -05004310 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4311 if (!ext4_mb_use_preallocated(ac)) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004312 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4313 ext4_mb_normalize_request(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004314repeat:
4315 /* allocate space in core */
Aditya Kali6c7a1202010-08-05 16:22:24 -04004316 *errp = ext4_mb_regular_allocator(ac);
4317 if (*errp)
4318 goto errout;
Alex Tomasc9de5602008-01-29 00:19:52 -05004319
4320 /* as we've just preallocated more space than
4321 * user requested orinally, we store allocated
4322 * space in a special descriptor */
Eric Sandeen256bdb42008-02-10 01:13:33 -05004323 if (ac->ac_status == AC_STATUS_FOUND &&
4324 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4325 ext4_mb_new_preallocation(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05004326 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004327 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004328 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004329 if (*errp == -EAGAIN) {
Aneesh Kumar K.V8556e8f2009-01-05 21:46:55 -05004330 /*
4331 * drop the reference that we took
4332 * in ext4_mb_use_best_found
4333 */
4334 ext4_mb_release_context(ac);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04004335 ac->ac_b_ex.fe_group = 0;
4336 ac->ac_b_ex.fe_start = 0;
4337 ac->ac_b_ex.fe_len = 0;
4338 ac->ac_status = AC_STATUS_CONTINUE;
4339 goto repeat;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004340 } else if (*errp)
4341 errout:
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05004342 ext4_discard_allocated_blocks(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004343 else {
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04004344 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4345 ar->len = ac->ac_b_ex.fe_len;
4346 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004347 } else {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004348 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05004349 if (freed)
4350 goto repeat;
4351 *errp = -ENOSPC;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004352 }
4353
4354 if (*errp) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004355 ac->ac_b_ex.fe_len = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004356 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004357 ext4_mb_show_ac(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05004358 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004359 ext4_mb_release_context(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004360out:
4361 if (ac)
4362 kmem_cache_free(ext4_ac_cachep, ac);
Mingming Cao60e58e02009-01-22 18:13:05 +01004363 if (inquota && ar->len < inquota)
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004364 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004365 if (!ar->len) {
Theodore Ts'of2321092011-01-10 12:12:36 -05004366 if (!ext4_test_inode_state(ar->inode,
4367 EXT4_STATE_DELALLOC_RESERVED))
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004368 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04004369 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004370 reserv_clstrs);
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004371 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004372
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004373 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004374
Alex Tomasc9de5602008-01-29 00:19:52 -05004375 return block;
4376}
Alex Tomasc9de5602008-01-29 00:19:52 -05004377
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004378/*
4379 * We can merge two free data extents only if the physical blocks
4380 * are contiguous, AND the extents were freed by the same transaction,
4381 * AND the blocks are associated with the same group.
4382 */
4383static int can_merge(struct ext4_free_data *entry1,
4384 struct ext4_free_data *entry2)
4385{
Bobi Jam18aadd42012-02-20 17:53:02 -05004386 if ((entry1->efd_tid == entry2->efd_tid) &&
4387 (entry1->efd_group == entry2->efd_group) &&
4388 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004389 return 1;
4390 return 0;
4391}
4392
Eric Sandeen4ddfef72008-04-29 08:11:12 -04004393static noinline_for_stack int
4394ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004395 struct ext4_free_data *new_entry)
Alex Tomasc9de5602008-01-29 00:19:52 -05004396{
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004397 ext4_group_t group = e4b->bd_group;
Theodore Ts'o84130192011-09-09 18:50:51 -04004398 ext4_grpblk_t cluster;
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004399 struct ext4_free_data *entry;
Alex Tomasc9de5602008-01-29 00:19:52 -05004400 struct ext4_group_info *db = e4b->bd_info;
4401 struct super_block *sb = e4b->bd_sb;
4402 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004403 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4404 struct rb_node *parent = NULL, *new_node;
4405
Frank Mayhar03901312009-01-07 00:06:22 -05004406 BUG_ON(!ext4_handle_valid(handle));
Alex Tomasc9de5602008-01-29 00:19:52 -05004407 BUG_ON(e4b->bd_bitmap_page == NULL);
4408 BUG_ON(e4b->bd_buddy_page == NULL);
4409
Bobi Jam18aadd42012-02-20 17:53:02 -05004410 new_node = &new_entry->efd_node;
4411 cluster = new_entry->efd_start_cluster;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004412
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004413 if (!*n) {
4414 /* first free block exent. We need to
4415 protect buddy cache from being freed,
4416 * otherwise we'll refresh it from
4417 * on-disk bitmap and lose not-yet-available
4418 * blocks */
4419 page_cache_get(e4b->bd_buddy_page);
4420 page_cache_get(e4b->bd_bitmap_page);
4421 }
4422 while (*n) {
4423 parent = *n;
Bobi Jam18aadd42012-02-20 17:53:02 -05004424 entry = rb_entry(parent, struct ext4_free_data, efd_node);
4425 if (cluster < entry->efd_start_cluster)
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004426 n = &(*n)->rb_left;
Bobi Jam18aadd42012-02-20 17:53:02 -05004427 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004428 n = &(*n)->rb_right;
4429 else {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004430 ext4_grp_locked_error(sb, group, 0,
Theodore Ts'o84130192011-09-09 18:50:51 -04004431 ext4_group_first_block_no(sb, group) +
4432 EXT4_C2B(sbi, cluster),
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004433 "Block already on to-be-freed list");
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004434 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004435 }
4436 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004437
4438 rb_link_node(new_node, parent, n);
4439 rb_insert_color(new_node, &db->bb_free_root);
4440
4441 /* Now try to see the extent can be merged to left and right */
4442 node = rb_prev(new_node);
4443 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004444 entry = rb_entry(node, struct ext4_free_data, efd_node);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004445 if (can_merge(entry, new_entry)) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004446 new_entry->efd_start_cluster = entry->efd_start_cluster;
4447 new_entry->efd_count += entry->efd_count;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004448 rb_erase(node, &(db->bb_free_root));
Bobi Jam18aadd42012-02-20 17:53:02 -05004449 ext4_journal_callback_del(handle, &entry->efd_jce);
4450 kmem_cache_free(ext4_free_data_cachep, entry);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004451 }
4452 }
4453
4454 node = rb_next(new_node);
4455 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004456 entry = rb_entry(node, struct ext4_free_data, efd_node);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004457 if (can_merge(new_entry, entry)) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004458 new_entry->efd_count += entry->efd_count;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004459 rb_erase(node, &(db->bb_free_root));
Bobi Jam18aadd42012-02-20 17:53:02 -05004460 ext4_journal_callback_del(handle, &entry->efd_jce);
4461 kmem_cache_free(ext4_free_data_cachep, entry);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004462 }
4463 }
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04004464 /* Add the extent to transaction's private list */
Bobi Jam18aadd42012-02-20 17:53:02 -05004465 ext4_journal_callback_add(handle, ext4_free_data_callback,
4466 &new_entry->efd_jce);
Alex Tomasc9de5602008-01-29 00:19:52 -05004467 return 0;
4468}
4469
Theodore Ts'o44338712009-11-22 07:44:56 -05004470/**
4471 * ext4_free_blocks() -- Free given blocks and update quota
4472 * @handle: handle for this transaction
4473 * @inode: inode
4474 * @block: start physical block to free
4475 * @count: number of blocks to count
Yongqiang Yang5def1362011-06-05 23:26:40 -04004476 * @flags: flags used by ext4_free_blocks
Alex Tomasc9de5602008-01-29 00:19:52 -05004477 */
Theodore Ts'o44338712009-11-22 07:44:56 -05004478void ext4_free_blocks(handle_t *handle, struct inode *inode,
Theodore Ts'oe6362602009-11-23 07:17:05 -05004479 struct buffer_head *bh, ext4_fsblk_t block,
4480 unsigned long count, int flags)
Alex Tomasc9de5602008-01-29 00:19:52 -05004481{
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05004482 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004483 struct super_block *sb = inode->i_sb;
Alex Tomasc9de5602008-01-29 00:19:52 -05004484 struct ext4_group_desc *gdp;
Theodore Ts'o44338712009-11-22 07:44:56 -05004485 unsigned long freed = 0;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05004486 unsigned int overflow;
Alex Tomasc9de5602008-01-29 00:19:52 -05004487 ext4_grpblk_t bit;
4488 struct buffer_head *gd_bh;
4489 ext4_group_t block_group;
4490 struct ext4_sb_info *sbi;
4491 struct ext4_buddy e4b;
Theodore Ts'o84130192011-09-09 18:50:51 -04004492 unsigned int count_clusters;
Alex Tomasc9de5602008-01-29 00:19:52 -05004493 int err = 0;
4494 int ret;
4495
Theodore Ts'oe6362602009-11-23 07:17:05 -05004496 if (bh) {
4497 if (block)
4498 BUG_ON(block != bh->b_blocknr);
4499 else
4500 block = bh->b_blocknr;
4501 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004502
Alex Tomasc9de5602008-01-29 00:19:52 -05004503 sbi = EXT4_SB(sb);
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004504 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4505 !ext4_data_block_valid(sbi, block, count)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05004506 ext4_error(sb, "Freeing blocks not in datazone - "
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004507 "block = %llu, count = %lu", block, count);
Alex Tomasc9de5602008-01-29 00:19:52 -05004508 goto error_return;
4509 }
4510
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004511 ext4_debug("freeing block %llu\n", block);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004512 trace_ext4_free_blocks(inode, block, count, flags);
4513
4514 if (flags & EXT4_FREE_BLOCKS_FORGET) {
4515 struct buffer_head *tbh = bh;
4516 int i;
4517
4518 BUG_ON(bh && (count > 1));
4519
4520 for (i = 0; i < count; i++) {
4521 if (!bh)
4522 tbh = sb_find_get_block(inode->i_sb,
4523 block + i);
Namhyung Kim87783692010-10-27 21:30:11 -04004524 if (unlikely(!tbh))
4525 continue;
Theodore Ts'o60e66792010-05-17 07:00:00 -04004526 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
Theodore Ts'oe6362602009-11-23 07:17:05 -05004527 inode, tbh, block + i);
4528 }
4529 }
4530
Theodore Ts'o60e66792010-05-17 07:00:00 -04004531 /*
Theodore Ts'oe6362602009-11-23 07:17:05 -05004532 * We need to make sure we don't reuse the freed block until
4533 * after the transaction is committed, which we can do by
4534 * treating the block as metadata, below. We make an
4535 * exception if the inode is to be written in writeback mode
4536 * since writeback mode has weak data consistency guarantees.
4537 */
4538 if (!ext4_should_writeback_data(inode))
4539 flags |= EXT4_FREE_BLOCKS_METADATA;
Alex Tomasc9de5602008-01-29 00:19:52 -05004540
Theodore Ts'o84130192011-09-09 18:50:51 -04004541 /*
4542 * If the extent to be freed does not begin on a cluster
4543 * boundary, we need to deal with partial clusters at the
4544 * beginning and end of the extent. Normally we will free
4545 * blocks at the beginning or the end unless we are explicitly
4546 * requested to avoid doing so.
4547 */
4548 overflow = block & (sbi->s_cluster_ratio - 1);
4549 if (overflow) {
4550 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4551 overflow = sbi->s_cluster_ratio - overflow;
4552 block += overflow;
4553 if (count > overflow)
4554 count -= overflow;
4555 else
4556 return;
4557 } else {
4558 block -= overflow;
4559 count += overflow;
4560 }
4561 }
4562 overflow = count & (sbi->s_cluster_ratio - 1);
4563 if (overflow) {
4564 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4565 if (count > overflow)
4566 count -= overflow;
4567 else
4568 return;
4569 } else
4570 count += sbi->s_cluster_ratio - overflow;
4571 }
4572
Alex Tomasc9de5602008-01-29 00:19:52 -05004573do_more:
4574 overflow = 0;
4575 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4576
4577 /*
4578 * Check to see if we are freeing blocks across a group
4579 * boundary.
4580 */
Theodore Ts'o84130192011-09-09 18:50:51 -04004581 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4582 overflow = EXT4_C2B(sbi, bit) + count -
4583 EXT4_BLOCKS_PER_GROUP(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004584 count -= overflow;
4585 }
Theodore Ts'o84130192011-09-09 18:50:51 -04004586 count_clusters = EXT4_B2C(sbi, count);
Theodore Ts'o574ca172008-07-11 19:27:31 -04004587 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004588 if (!bitmap_bh) {
4589 err = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05004590 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004591 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004592 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004593 if (!gdp) {
4594 err = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05004595 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004596 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004597
4598 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4599 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4600 in_range(block, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004601 EXT4_SB(sb)->s_itb_per_group) ||
Alex Tomasc9de5602008-01-29 00:19:52 -05004602 in_range(block + count - 1, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004603 EXT4_SB(sb)->s_itb_per_group)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004604
Eric Sandeen12062dd2010-02-15 14:19:27 -05004605 ext4_error(sb, "Freeing blocks in system zone - "
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004606 "Block = %llu, count = %lu", block, count);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04004607 /* err = 0. ext4_std_error should be a no op */
4608 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004609 }
4610
4611 BUFFER_TRACE(bitmap_bh, "getting write access");
4612 err = ext4_journal_get_write_access(handle, bitmap_bh);
4613 if (err)
4614 goto error_return;
4615
4616 /*
4617 * We are about to modify some metadata. Call the journal APIs
4618 * to unshare ->b_data if a currently-committing transaction is
4619 * using it
4620 */
4621 BUFFER_TRACE(gd_bh, "get_write_access");
4622 err = ext4_journal_get_write_access(handle, gd_bh);
4623 if (err)
4624 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004625#ifdef AGGRESSIVE_CHECK
4626 {
4627 int i;
Theodore Ts'o84130192011-09-09 18:50:51 -04004628 for (i = 0; i < count_clusters; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -05004629 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4630 }
4631#endif
Theodore Ts'o84130192011-09-09 18:50:51 -04004632 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004633
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05004634 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4635 if (err)
4636 goto error_return;
Theodore Ts'oe6362602009-11-23 07:17:05 -05004637
4638 if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004639 struct ext4_free_data *new_entry;
4640 /*
4641 * blocks being freed are metadata. these blocks shouldn't
4642 * be used until this transaction is committed
4643 */
Bobi Jam18aadd42012-02-20 17:53:02 -05004644 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
Theodore Ts'ob72143a2010-12-20 07:26:59 -05004645 if (!new_entry) {
Salman Qazi02b78312012-05-31 23:51:27 -04004646 ext4_mb_unload_buddy(&e4b);
Theodore Ts'ob72143a2010-12-20 07:26:59 -05004647 err = -ENOMEM;
4648 goto error_return;
4649 }
Bobi Jam18aadd42012-02-20 17:53:02 -05004650 new_entry->efd_start_cluster = bit;
4651 new_entry->efd_group = block_group;
4652 new_entry->efd_count = count_clusters;
4653 new_entry->efd_tid = handle->h_transaction->t_tid;
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004654
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004655 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004656 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004657 ext4_mb_free_metadata(handle, &e4b, new_entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05004658 } else {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004659 /* need to update group_info->bb_free and bitmap
4660 * with group lock held. generate_buddy look at
4661 * them with group lock_held
4662 */
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004663 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004664 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4665 mb_free_blocks(inode, &e4b, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004666 }
4667
Theodore Ts'o021b65b2011-09-09 19:08:51 -04004668 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4669 ext4_free_group_clusters_set(sb, gdp, ret);
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -04004670 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
4671 EXT4_BLOCKS_PER_GROUP(sb) / 8);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04004672 ext4_group_desc_csum_set(sb, block_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004673 ext4_unlock_group(sb, block_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04004674 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004675
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004676 if (sbi->s_log_groups_per_flex) {
4677 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o24aaa8e2011-09-09 18:58:51 -04004678 atomic_add(count_clusters,
4679 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004680 }
4681
Jing Zhange39e07f2010-05-14 00:00:00 -04004682 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05004683
Theodore Ts'o44338712009-11-22 07:44:56 -05004684 freed += count;
Alex Tomasc9de5602008-01-29 00:19:52 -05004685
Aditya Kali7b415bf2011-09-09 19:04:51 -04004686 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
4687 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
4688
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004689 /* We dirtied the bitmap block */
4690 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4691 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4692
Alex Tomasc9de5602008-01-29 00:19:52 -05004693 /* And the group descriptor block */
4694 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
Frank Mayhar03901312009-01-07 00:06:22 -05004695 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05004696 if (!err)
4697 err = ret;
4698
4699 if (overflow && !err) {
4700 block += count;
4701 count = overflow;
4702 put_bh(bitmap_bh);
4703 goto do_more;
4704 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004705error_return:
4706 brelse(bitmap_bh);
4707 ext4_std_error(sb, err);
Alex Tomasc9de5602008-01-29 00:19:52 -05004708 return;
4709}
Lukas Czerner7360d172010-10-27 21:30:12 -04004710
4711/**
Yongqiang Yang05291552011-07-26 21:43:56 -04004712 * ext4_group_add_blocks() -- Add given blocks to an existing group
Amir Goldstein2846e822011-05-09 10:46:41 -04004713 * @handle: handle to this transaction
4714 * @sb: super block
4715 * @block: start physcial block to add to the block group
4716 * @count: number of blocks to free
4717 *
Amir Goldsteine73a3472011-05-09 21:40:01 -04004718 * This marks the blocks as free in the bitmap and buddy.
Amir Goldstein2846e822011-05-09 10:46:41 -04004719 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004720int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
Amir Goldstein2846e822011-05-09 10:46:41 -04004721 ext4_fsblk_t block, unsigned long count)
4722{
4723 struct buffer_head *bitmap_bh = NULL;
4724 struct buffer_head *gd_bh;
4725 ext4_group_t block_group;
4726 ext4_grpblk_t bit;
4727 unsigned int i;
4728 struct ext4_group_desc *desc;
4729 struct ext4_sb_info *sbi = EXT4_SB(sb);
Amir Goldsteine73a3472011-05-09 21:40:01 -04004730 struct ext4_buddy e4b;
Amir Goldstein2846e822011-05-09 10:46:41 -04004731 int err = 0, ret, blk_free_count;
4732 ext4_grpblk_t blocks_freed;
Amir Goldstein2846e822011-05-09 10:46:41 -04004733
4734 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4735
Yongqiang Yang4740b832011-07-26 21:51:08 -04004736 if (count == 0)
4737 return 0;
4738
Amir Goldstein2846e822011-05-09 10:46:41 -04004739 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
Amir Goldstein2846e822011-05-09 10:46:41 -04004740 /*
4741 * Check to see if we are freeing blocks across a group
4742 * boundary.
4743 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004744 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4745 ext4_warning(sb, "too much blocks added to group %u\n",
4746 block_group);
4747 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04004748 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004749 }
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04004750
Amir Goldstein2846e822011-05-09 10:46:41 -04004751 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004752 if (!bitmap_bh) {
4753 err = -EIO;
Amir Goldstein2846e822011-05-09 10:46:41 -04004754 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004755 }
4756
Amir Goldstein2846e822011-05-09 10:46:41 -04004757 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004758 if (!desc) {
4759 err = -EIO;
Amir Goldstein2846e822011-05-09 10:46:41 -04004760 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004761 }
Amir Goldstein2846e822011-05-09 10:46:41 -04004762
4763 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4764 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4765 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4766 in_range(block + count - 1, ext4_inode_table(sb, desc),
4767 sbi->s_itb_per_group)) {
4768 ext4_error(sb, "Adding blocks in system zones - "
4769 "Block = %llu, count = %lu",
4770 block, count);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004771 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04004772 goto error_return;
4773 }
4774
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04004775 BUFFER_TRACE(bitmap_bh, "getting write access");
4776 err = ext4_journal_get_write_access(handle, bitmap_bh);
Amir Goldstein2846e822011-05-09 10:46:41 -04004777 if (err)
4778 goto error_return;
4779
4780 /*
4781 * We are about to modify some metadata. Call the journal APIs
4782 * to unshare ->b_data if a currently-committing transaction is
4783 * using it
4784 */
4785 BUFFER_TRACE(gd_bh, "get_write_access");
4786 err = ext4_journal_get_write_access(handle, gd_bh);
4787 if (err)
4788 goto error_return;
Amir Goldsteine73a3472011-05-09 21:40:01 -04004789
Amir Goldstein2846e822011-05-09 10:46:41 -04004790 for (i = 0, blocks_freed = 0; i < count; i++) {
4791 BUFFER_TRACE(bitmap_bh, "clear bit");
Amir Goldsteine73a3472011-05-09 21:40:01 -04004792 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
Amir Goldstein2846e822011-05-09 10:46:41 -04004793 ext4_error(sb, "bit already cleared for block %llu",
4794 (ext4_fsblk_t)(block + i));
4795 BUFFER_TRACE(bitmap_bh, "bit already cleared");
4796 } else {
4797 blocks_freed++;
4798 }
4799 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04004800
4801 err = ext4_mb_load_buddy(sb, block_group, &e4b);
4802 if (err)
4803 goto error_return;
4804
4805 /*
4806 * need to update group_info->bb_free and bitmap
4807 * with group lock held. generate_buddy look at
4808 * them with group lock_held
4809 */
Amir Goldstein2846e822011-05-09 10:46:41 -04004810 ext4_lock_group(sb, block_group);
Amir Goldsteine73a3472011-05-09 21:40:01 -04004811 mb_clear_bits(bitmap_bh->b_data, bit, count);
4812 mb_free_blocks(NULL, &e4b, bit, count);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04004813 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4814 ext4_free_group_clusters_set(sb, desc, blk_free_count);
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -04004815 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
4816 EXT4_BLOCKS_PER_GROUP(sb) / 8);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04004817 ext4_group_desc_csum_set(sb, block_group, desc);
Amir Goldstein2846e822011-05-09 10:46:41 -04004818 ext4_unlock_group(sb, block_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04004819 percpu_counter_add(&sbi->s_freeclusters_counter,
4820 EXT4_B2C(sbi, blocks_freed));
Amir Goldstein2846e822011-05-09 10:46:41 -04004821
4822 if (sbi->s_log_groups_per_flex) {
4823 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o24aaa8e2011-09-09 18:58:51 -04004824 atomic_add(EXT4_B2C(sbi, blocks_freed),
4825 &sbi->s_flex_groups[flex_group].free_clusters);
Amir Goldstein2846e822011-05-09 10:46:41 -04004826 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04004827
4828 ext4_mb_unload_buddy(&e4b);
Amir Goldstein2846e822011-05-09 10:46:41 -04004829
4830 /* We dirtied the bitmap block */
4831 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4832 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4833
4834 /* And the group descriptor block */
4835 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4836 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
4837 if (!err)
4838 err = ret;
4839
4840error_return:
4841 brelse(bitmap_bh);
4842 ext4_std_error(sb, err);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004843 return err;
Amir Goldstein2846e822011-05-09 10:46:41 -04004844}
4845
4846/**
Lukas Czerner7360d172010-10-27 21:30:12 -04004847 * ext4_trim_extent -- function to TRIM one single free extent in the group
4848 * @sb: super block for the file system
4849 * @start: starting block of the free extent in the alloc. group
4850 * @count: number of blocks to TRIM
4851 * @group: alloc. group we are working with
4852 * @e4b: ext4 buddy for the group
4853 *
4854 * Trim "count" blocks starting at "start" in the "group". To assure that no
4855 * one will allocate those blocks, mark it as used in buddy bitmap. This must
4856 * be called with under the group lock.
4857 */
Theodore Ts'od9f34502011-04-30 13:47:24 -04004858static void ext4_trim_extent(struct super_block *sb, int start, int count,
4859 ext4_group_t group, struct ext4_buddy *e4b)
Lukas Czerner7360d172010-10-27 21:30:12 -04004860{
4861 struct ext4_free_extent ex;
Lukas Czerner7360d172010-10-27 21:30:12 -04004862
Tao Mab3d4c2b2011-07-11 00:01:52 -04004863 trace_ext4_trim_extent(sb, group, start, count);
4864
Lukas Czerner7360d172010-10-27 21:30:12 -04004865 assert_spin_locked(ext4_group_lock_ptr(sb, group));
4866
4867 ex.fe_start = start;
4868 ex.fe_group = group;
4869 ex.fe_len = count;
4870
4871 /*
4872 * Mark blocks used, so no one can reuse them while
4873 * being trimmed.
4874 */
4875 mb_mark_used(e4b, &ex);
4876 ext4_unlock_group(sb, group);
Theodore Ts'od9f34502011-04-30 13:47:24 -04004877 ext4_issue_discard(sb, group, start, count);
Lukas Czerner7360d172010-10-27 21:30:12 -04004878 ext4_lock_group(sb, group);
4879 mb_free_blocks(NULL, e4b, start, ex.fe_len);
Lukas Czerner7360d172010-10-27 21:30:12 -04004880}
4881
4882/**
4883 * ext4_trim_all_free -- function to trim all free space in alloc. group
4884 * @sb: super block for file system
Tao Ma22612282011-07-11 00:04:34 -04004885 * @group: group to be trimmed
Lukas Czerner7360d172010-10-27 21:30:12 -04004886 * @start: first group block to examine
4887 * @max: last group block to examine
4888 * @minblocks: minimum extent block count
4889 *
4890 * ext4_trim_all_free walks through group's buddy bitmap searching for free
4891 * extents. When the free block is found, ext4_trim_extent is called to TRIM
4892 * the extent.
4893 *
4894 *
4895 * ext4_trim_all_free walks through group's block bitmap searching for free
4896 * extents. When the free extent is found, mark it as used in group buddy
4897 * bitmap. Then issue a TRIM command on this extent and free the extent in
4898 * the group buddy bitmap. This is done until whole group is scanned.
4899 */
Lukas Czerner0b75a842011-02-23 12:22:49 -05004900static ext4_grpblk_t
Lukas Czerner78944082011-05-24 18:16:27 -04004901ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
4902 ext4_grpblk_t start, ext4_grpblk_t max,
4903 ext4_grpblk_t minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04004904{
4905 void *bitmap;
Tao Ma169ddc32011-07-11 00:00:07 -04004906 ext4_grpblk_t next, count = 0, free_count = 0;
Lukas Czerner78944082011-05-24 18:16:27 -04004907 struct ext4_buddy e4b;
4908 int ret;
Lukas Czerner7360d172010-10-27 21:30:12 -04004909
Tao Mab3d4c2b2011-07-11 00:01:52 -04004910 trace_ext4_trim_all_free(sb, group, start, max);
4911
Lukas Czerner78944082011-05-24 18:16:27 -04004912 ret = ext4_mb_load_buddy(sb, group, &e4b);
4913 if (ret) {
4914 ext4_error(sb, "Error in loading buddy "
4915 "information for %u", group);
4916 return ret;
4917 }
Lukas Czerner78944082011-05-24 18:16:27 -04004918 bitmap = e4b.bd_bitmap;
Lukas Czerner28739ee2011-05-24 18:28:07 -04004919
4920 ext4_lock_group(sb, group);
Tao Ma3d56b8d2011-07-11 00:03:38 -04004921 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
4922 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
4923 goto out;
4924
Lukas Czerner78944082011-05-24 18:16:27 -04004925 start = (e4b.bd_info->bb_first_free > start) ?
4926 e4b.bd_info->bb_first_free : start;
Lukas Czerner7360d172010-10-27 21:30:12 -04004927
Lukas Czerner913eed82012-03-21 21:22:22 -04004928 while (start <= max) {
4929 start = mb_find_next_zero_bit(bitmap, max + 1, start);
4930 if (start > max)
Lukas Czerner7360d172010-10-27 21:30:12 -04004931 break;
Lukas Czerner913eed82012-03-21 21:22:22 -04004932 next = mb_find_next_bit(bitmap, max + 1, start);
Lukas Czerner7360d172010-10-27 21:30:12 -04004933
4934 if ((next - start) >= minblocks) {
Theodore Ts'od9f34502011-04-30 13:47:24 -04004935 ext4_trim_extent(sb, start,
Lukas Czerner78944082011-05-24 18:16:27 -04004936 next - start, group, &e4b);
Lukas Czerner7360d172010-10-27 21:30:12 -04004937 count += next - start;
4938 }
Tao Ma169ddc32011-07-11 00:00:07 -04004939 free_count += next - start;
Lukas Czerner7360d172010-10-27 21:30:12 -04004940 start = next + 1;
4941
4942 if (fatal_signal_pending(current)) {
4943 count = -ERESTARTSYS;
4944 break;
4945 }
4946
4947 if (need_resched()) {
4948 ext4_unlock_group(sb, group);
4949 cond_resched();
4950 ext4_lock_group(sb, group);
4951 }
4952
Tao Ma169ddc32011-07-11 00:00:07 -04004953 if ((e4b.bd_info->bb_free - free_count) < minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04004954 break;
4955 }
Tao Ma3d56b8d2011-07-11 00:03:38 -04004956
4957 if (!ret)
4958 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
4959out:
Lukas Czerner7360d172010-10-27 21:30:12 -04004960 ext4_unlock_group(sb, group);
Lukas Czerner78944082011-05-24 18:16:27 -04004961 ext4_mb_unload_buddy(&e4b);
Lukas Czerner7360d172010-10-27 21:30:12 -04004962
4963 ext4_debug("trimmed %d blocks in the group %d\n",
4964 count, group);
4965
Lukas Czerner7360d172010-10-27 21:30:12 -04004966 return count;
4967}
4968
4969/**
4970 * ext4_trim_fs() -- trim ioctl handle function
4971 * @sb: superblock for filesystem
4972 * @range: fstrim_range structure
4973 *
4974 * start: First Byte to trim
4975 * len: number of Bytes to trim from start
4976 * minlen: minimum extent length in Bytes
4977 * ext4_trim_fs goes through all allocation groups containing Bytes from
4978 * start to start+len. For each such a group ext4_trim_all_free function
4979 * is invoked to trim all free space.
4980 */
4981int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4982{
Lukas Czerner78944082011-05-24 18:16:27 -04004983 struct ext4_group_info *grp;
Lukas Czerner913eed82012-03-21 21:22:22 -04004984 ext4_group_t group, first_group, last_group;
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04004985 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
Lukas Czerner913eed82012-03-21 21:22:22 -04004986 uint64_t start, end, minlen, trimmed = 0;
Jan Kara0f0a25b2011-01-11 15:16:31 -05004987 ext4_fsblk_t first_data_blk =
4988 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
Lukas Czerner913eed82012-03-21 21:22:22 -04004989 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
Lukas Czerner7360d172010-10-27 21:30:12 -04004990 int ret = 0;
4991
4992 start = range->start >> sb->s_blocksize_bits;
Lukas Czerner913eed82012-03-21 21:22:22 -04004993 end = start + (range->len >> sb->s_blocksize_bits) - 1;
Lukas Czerner7360d172010-10-27 21:30:12 -04004994 minlen = range->minlen >> sb->s_blocksize_bits;
Lukas Czerner7360d172010-10-27 21:30:12 -04004995
Lukas Czerner913eed82012-03-21 21:22:22 -04004996 if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) ||
4997 unlikely(start >= max_blks))
Lukas Czerner7360d172010-10-27 21:30:12 -04004998 return -EINVAL;
Lukas Czerner913eed82012-03-21 21:22:22 -04004999 if (end >= max_blks)
5000 end = max_blks - 1;
5001 if (end <= first_data_blk)
Tao Ma22f10452011-07-10 23:52:37 -04005002 goto out;
Lukas Czerner913eed82012-03-21 21:22:22 -04005003 if (start < first_data_blk)
Jan Kara0f0a25b2011-01-11 15:16:31 -05005004 start = first_data_blk;
Lukas Czerner7360d172010-10-27 21:30:12 -04005005
Lukas Czerner913eed82012-03-21 21:22:22 -04005006 /* Determine first and last group to examine based on start and end */
Lukas Czerner7360d172010-10-27 21:30:12 -04005007 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005008 &first_group, &first_cluster);
Lukas Czerner913eed82012-03-21 21:22:22 -04005009 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005010 &last_group, &last_cluster);
Lukas Czerner7360d172010-10-27 21:30:12 -04005011
Lukas Czerner913eed82012-03-21 21:22:22 -04005012 /* end now represents the last cluster to discard in this group */
5013 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
Lukas Czerner7360d172010-10-27 21:30:12 -04005014
5015 for (group = first_group; group <= last_group; group++) {
Lukas Czerner78944082011-05-24 18:16:27 -04005016 grp = ext4_get_group_info(sb, group);
5017 /* We only do this if the grp has never been initialized */
5018 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
5019 ret = ext4_mb_init_group(sb, group);
5020 if (ret)
5021 break;
Lukas Czerner7360d172010-10-27 21:30:12 -04005022 }
5023
Tao Ma0ba08512011-03-23 15:48:11 -04005024 /*
Lukas Czerner913eed82012-03-21 21:22:22 -04005025 * For all the groups except the last one, last cluster will
5026 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5027 * change it for the last group, note that last_cluster is
5028 * already computed earlier by ext4_get_group_no_and_offset()
Tao Ma0ba08512011-03-23 15:48:11 -04005029 */
Lukas Czerner913eed82012-03-21 21:22:22 -04005030 if (group == last_group)
5031 end = last_cluster;
Lukas Czerner7360d172010-10-27 21:30:12 -04005032
Lukas Czerner78944082011-05-24 18:16:27 -04005033 if (grp->bb_free >= minlen) {
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005034 cnt = ext4_trim_all_free(sb, group, first_cluster,
Lukas Czerner913eed82012-03-21 21:22:22 -04005035 end, minlen);
Lukas Czerner7360d172010-10-27 21:30:12 -04005036 if (cnt < 0) {
5037 ret = cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005038 break;
5039 }
Lukas Czerner21e7fd22012-03-21 21:24:22 -04005040 trimmed += cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005041 }
Lukas Czerner913eed82012-03-21 21:22:22 -04005042
5043 /*
5044 * For every group except the first one, we are sure
5045 * that the first cluster to discard will be cluster #0.
5046 */
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005047 first_cluster = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005048 }
Lukas Czerner7360d172010-10-27 21:30:12 -04005049
Tao Ma3d56b8d2011-07-11 00:03:38 -04005050 if (!ret)
5051 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5052
Tao Ma22f10452011-07-10 23:52:37 -04005053out:
Lukas Czernera7967f02012-03-21 21:26:22 -04005054 range->len = trimmed * sb->s_blocksize;
Lukas Czerner7360d172010-10-27 21:30:12 -04005055 return ret;
5056}