[XFS] Add a debug flag for allocations which are known to be larger than
one page.
SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26800a
Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index aba7fcf..f77fe5c 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -34,6 +34,14 @@
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
+#ifdef DEBUG
+ if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
+ printk(KERN_WARNING "Large %s attempt, size=%ld\n",
+ __FUNCTION__, (long)size);
+ dump_stack();
+ }
+#endif
+
do {
if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
ptr = kmalloc(size, lflags);
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 0e8293c..6d24274 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -30,6 +30,7 @@
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
+#define KM_LARGE 0x0010u
/*
* We use a special process flag to avoid recursive callbacks into
@@ -41,7 +42,7 @@
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
+ BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 247adce..58b6599 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -768,7 +768,7 @@
_xfs_buf_initialize(bp, target, 0, len, 0);
try_again:
- data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
+ data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
if (unlikely(data == NULL))
goto fail_free_buf;