GFS2: Cache the most recently used resource group in the inode
This means that after the initial allocation for any inode, the
last used resource group is cached in the inode for future use.
This drastically reduces the number of lookups of resource
groups in the common case, and this the contention on that
data structure.
The allocation algorithm is the same as previously, except that we
always check to see if the goal block is within the cached rgrp
first before going to the rbtree to look one up.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 88d5b75..5bfb970 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -882,24 +882,21 @@
/**
* try_rgrp_fit - See if a given reservation will fit in a given RG
* @rgd: the RG data
- * @al: the struct gfs2_alloc structure describing the reservation
+ * @ip: the inode
*
* If there's room for the requested blocks to be allocated from the RG:
- * Sets the $al_rgd field in @al.
*
* Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
*/
-static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
+static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
{
+ const struct gfs2_alloc *al = ip->i_alloc;
+
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
return 0;
-
- if (rgd->rd_free_clone >= al->al_requested) {
- al->al_rgd = rgd;
+ if (rgd->rd_free_clone >= al->al_requested)
return 1;
- }
-
return 0;
}
@@ -985,7 +982,10 @@
int error, rg_locked;
int loops = 0;
- rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal);
+ if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
+ rgd = begin = ip->i_rgd;
+ else
+ rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal);
if (rgd == NULL)
return -EBADSLT;
@@ -1002,8 +1002,10 @@
}
switch (error) {
case 0:
- if (try_rgrp_fit(rgd, al))
+ if (try_rgrp_fit(rgd, ip)) {
+ ip->i_rgd = rgd;
return 0;
+ }
if (rgd->rd_flags & GFS2_RDF_CHECK)
try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
if (!rg_locked)
@@ -1014,7 +1016,6 @@
if (rgd == begin)
loops++;
break;
-
default:
return error;
}
@@ -1042,21 +1043,20 @@
if (gfs2_assert_warn(sdp, al->al_requested))
return -EINVAL;
-try_again:
do {
error = get_local_rgrp(ip, &last_unlinked);
- /* If there is no space, flushing the log may release some */
- if (error) {
- if (ip == GFS2_I(sdp->sd_rindex) &&
- !sdp->sd_rindex_uptodate) {
- error = gfs2_ri_update(ip);
- if (error)
- return error;
- goto try_again;
- }
- gfs2_log_flush(sdp, NULL);
+ if (error != -ENOSPC)
+ break;
+ /* Check that fs hasn't grown if writing to rindex */
+ if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
+ error = gfs2_ri_update(ip);
+ if (error)
+ break;
+ continue;
}
- } while (error && tries++ < 3);
+ /* Flushing the log may release space */
+ gfs2_log_flush(sdp, NULL);
+ } while (tries++ < 3);
if (error)
return error;
@@ -1086,7 +1086,6 @@
al->al_alloced, al->al_requested, al->al_file,
al->al_line);
- al->al_rgd = NULL;
if (al->al_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&al->al_rgd_gh);
}
@@ -1339,7 +1338,7 @@
if (al == NULL)
return -ECANCELED;
- rgd = al->al_rgd;
+ rgd = ip->i_rgd;
if (rgrp_contains_block(rgd, ip->i_goal))
goal = ip->i_goal - rgd->rd_data0;
@@ -1398,7 +1397,7 @@
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_alloc *al = dip->i_alloc;
- struct gfs2_rgrpd *rgd = al->al_rgd;
+ struct gfs2_rgrpd *rgd = dip->i_rgd;
u32 blk;
u64 block;
unsigned int n = 1;