Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/lib/Makefile b/lib/Makefile
index 6b597fd..d1f8ea2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
 	 proportions.o prio_heap.o ratelimit.o show_mem.o \
-	 is_single_threaded.o plist.o decompress.o find_next_bit.o
+	 is_single_threaded.o plist.o decompress.o find_next_bit.o memory_alloc.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 3f3b681..cf12bb8 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -315,30 +315,32 @@
 }
 EXPORT_SYMBOL(bitmap_clear);
 
-/*
+/**
  * bitmap_find_next_zero_area - find a contiguous aligned zero area
  * @map: The address to base the search on
  * @size: The bitmap size in bits
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @align_mask: Alignment mask for zero area
+ * @align_offset: Alignment offset for zero area.
  *
  * The @align_mask should be one less than a power of 2; the effect is that
- * the bit offset of all zero areas this function finds is multiples of that
- * power of 2. A @align_mask of 0 means no alignment is required.
+ * the bit offset of all zero areas this function finds plus @align_offset
+ * is multiple of that power of 2.
  */
-unsigned long bitmap_find_next_zero_area(unsigned long *map,
-					 unsigned long size,
-					 unsigned long start,
-					 unsigned int nr,
-					 unsigned long align_mask)
+unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+					     unsigned long size,
+					     unsigned long start,
+					     unsigned int nr,
+					     unsigned long align_mask,
+					     unsigned long align_offset)
 {
 	unsigned long index, end, i;
 again:
 	index = find_next_zero_bit(map, size, start);
 
 	/* Align allocation */
-	index = __ALIGN_MASK(index, align_mask);
+	index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
 
 	end = index + nr;
 	if (end > size)
@@ -350,7 +352,7 @@
 	}
 	return index;
 }
-EXPORT_SYMBOL(bitmap_find_next_zero_area);
+EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
 
 /*
  * Bitmap printing & parsing functions: first version by Bill Irwin,
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a78b7c6..b07b5b8 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -563,6 +563,39 @@
 }
 
 /**
+ * debug_object_assert_init - debug checks when object should be init-ed
+ * @addr:	address of the object
+ * @descr:	pointer to an object specific debug description structure
+ */
+void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
+{
+	struct debug_bucket *db;
+	struct debug_obj *obj;
+	unsigned long flags;
+
+	if (!debug_objects_enabled)
+		return;
+
+	db = get_bucket((unsigned long) addr);
+
+	raw_spin_lock_irqsave(&db->lock, flags);
+
+	obj = lookup_object(addr, db);
+	if (!obj) {
+		raw_spin_unlock_irqrestore(&db->lock, flags);
+		/*
+		 * Maybe the object is static.  Let the type specific
+		 * code decide what to do.
+		 */
+		debug_object_fixup(descr->fixup_assert_init, addr,
+				   ODEBUG_STATE_NOTAVAILABLE);
+		return;
+	}
+
+	raw_spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
  * debug_object_active_state - debug checks object usage state machine
  * @addr:	address of the object
  * @descr:	pointer to an object specific debug description structure
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 577ddf8..c7b9b9c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -16,23 +16,45 @@
 #include <linux/genalloc.h>
 
 
+/* General purpose special memory pool descriptor. */
+struct gen_pool {
+	rwlock_t lock;			/* protects chunks list */
+	struct list_head chunks;	/* list of chunks in this pool */
+	unsigned order;			/* minimum allocation order */
+};
+
+/* General purpose special memory pool chunk descriptor. */
+struct gen_pool_chunk {
+	spinlock_t lock;		/* protects bits */
+	struct list_head next_chunk;	/* next chunk in pool */
+	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
+	unsigned long start;		/* start of memory chunk */
+	unsigned long size;		/* number of bits */
+	unsigned long bits[0];		/* bitmap for allocating memory chunk */
+};
+
 /**
- * gen_pool_create - create a new special memory pool
- * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
- * @nid: node id of the node the pool structure should be allocated on, or -1
+ * gen_pool_create() - create a new special memory pool
+ * @order:	Log base 2 of number of bytes each bitmap bit
+ *		represents.
+ * @nid:	Node id of the node the pool structure should be allocated
+ *		on, or -1.  This will be also used for other allocations.
  *
  * Create a new special memory pool that can be used to manage special purpose
  * memory not managed by the regular kmalloc/kfree interface.
  */
-struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
+struct gen_pool *__must_check gen_pool_create(unsigned order, int nid)
 {
 	struct gen_pool *pool;
 
-	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
-	if (pool != NULL) {
+	if (WARN_ON(order >= BITS_PER_LONG))
+		return NULL;
+
+	pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid);
+	if (pool) {
 		rwlock_init(&pool->lock);
 		INIT_LIST_HEAD(&pool->chunks);
-		pool->min_alloc_order = min_alloc_order;
+		pool->order = order;
 	}
 	return pool;
 }
@@ -51,22 +73,29 @@
  *
  * Returns 0 on success or a -ve errno on failure.
  */
-int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
+int __must_check gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
 		 size_t size, int nid)
 {
 	struct gen_pool_chunk *chunk;
-	int nbits = size >> pool->min_alloc_order;
-	int nbytes = sizeof(struct gen_pool_chunk) +
-				(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
+	size_t nbytes;
 
-	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
-	if (unlikely(chunk == NULL))
+	if (WARN_ON(!virt || virt + size < virt ||
+	    (virt & ((1 << pool->order) - 1))))
+		return -EINVAL;
+
+	size = size >> pool->order;
+	if (WARN_ON(!size))
+		return -EINVAL;
+
+	nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits;
+	chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
+	if (!chunk)
 		return -ENOMEM;
 
 	spin_lock_init(&chunk->lock);
 	chunk->phys_addr = phys;
-	chunk->start_addr = virt;
-	chunk->end_addr = virt + size;
+	chunk->start = virt >> pool->order;
+	chunk->size  = size;
 
 	write_lock(&pool->lock);
 	list_add(&chunk->next_chunk, &pool->chunks);
@@ -92,8 +121,9 @@
 	list_for_each(_chunk, &pool->chunks) {
 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 
-		if (addr >= chunk->start_addr && addr < chunk->end_addr)
-			return chunk->phys_addr + addr - chunk->start_addr;
+		if (addr >= chunk->start &&
+		    addr < (chunk->start + chunk->size))
+			return chunk->phys_addr + addr - chunk->start;
 	}
 	read_unlock(&pool->lock);
 
@@ -102,115 +132,116 @@
 EXPORT_SYMBOL(gen_pool_virt_to_phys);
 
 /**
- * gen_pool_destroy - destroy a special memory pool
- * @pool: pool to destroy
+ * gen_pool_destroy() - destroy a special memory pool
+ * @pool:	Pool to destroy.
  *
  * Destroy the specified special memory pool. Verifies that there are no
  * outstanding allocations.
  */
 void gen_pool_destroy(struct gen_pool *pool)
 {
-	struct list_head *_chunk, *_next_chunk;
 	struct gen_pool_chunk *chunk;
-	int order = pool->min_alloc_order;
-	int bit, end_bit;
+	int bit;
 
-
-	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
-		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+	while (!list_empty(&pool->chunks)) {
+		chunk = list_entry(pool->chunks.next, struct gen_pool_chunk,
+				   next_chunk);
 		list_del(&chunk->next_chunk);
 
-		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
-		bit = find_next_bit(chunk->bits, end_bit, 0);
-		BUG_ON(bit < end_bit);
+		bit = find_next_bit(chunk->bits, chunk->size, 0);
+		BUG_ON(bit < chunk->size);
 
 		kfree(chunk);
 	}
 	kfree(pool);
-	return;
 }
 EXPORT_SYMBOL(gen_pool_destroy);
 
 /**
- * gen_pool_alloc - allocate special memory from the pool
- * @pool: pool to allocate from
- * @size: number of bytes to allocate from the pool
+ * gen_pool_alloc_aligned() - allocate special memory from the pool
+ * @pool:	Pool to allocate from.
+ * @size:	Number of bytes to allocate from the pool.
+ * @alignment_order:	Order the allocated space should be
+ *			aligned to (eg. 20 means allocated space
+ *			must be aligned to 1MiB).
  *
  * Allocate the requested number of bytes from the specified pool.
  * Uses a first-fit algorithm.
  */
-unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+unsigned long __must_check
+gen_pool_alloc_aligned(struct gen_pool *pool, size_t size,
+		       unsigned alignment_order)
 {
-	struct list_head *_chunk;
+	unsigned long addr, align_mask = 0, flags, start;
 	struct gen_pool_chunk *chunk;
-	unsigned long addr, flags;
-	int order = pool->min_alloc_order;
-	int nbits, start_bit, end_bit;
 
 	if (size == 0)
 		return 0;
 
-	nbits = (size + (1UL << order) - 1) >> order;
+	if (alignment_order > pool->order)
+		align_mask = (1 << (alignment_order - pool->order)) - 1;
+
+	size = (size + (1UL << pool->order) - 1) >> pool->order;
 
 	read_lock(&pool->lock);
-	list_for_each(_chunk, &pool->chunks) {
-		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
-
-		end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+	list_for_each_entry(chunk, &pool->chunks, next_chunk) {
+		if (chunk->size < size)
+			continue;
 
 		spin_lock_irqsave(&chunk->lock, flags);
-		start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
-						nbits, 0);
-		if (start_bit >= end_bit) {
+		start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size,
+						       0, size, align_mask,
+						       chunk->start);
+		if (start >= chunk->size) {
 			spin_unlock_irqrestore(&chunk->lock, flags);
 			continue;
 		}
 
-		addr = chunk->start_addr + ((unsigned long)start_bit << order);
-
-		bitmap_set(chunk->bits, start_bit, nbits);
+		bitmap_set(chunk->bits, start, size);
 		spin_unlock_irqrestore(&chunk->lock, flags);
-		read_unlock(&pool->lock);
-		return addr;
+		addr = (chunk->start + start) << pool->order;
+		goto done;
 	}
+
+	addr = 0;
+done:
 	read_unlock(&pool->lock);
-	return 0;
+	return addr;
 }
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_aligned);
 
 /**
- * gen_pool_free - free allocated special memory back to the pool
- * @pool: pool to free to
- * @addr: starting address of memory to free back to pool
- * @size: size in bytes of memory to free
+ * gen_pool_free() - free allocated special memory back to the pool
+ * @pool:	Pool to free to.
+ * @addr:	Starting address of memory to free back to pool.
+ * @size:	Size in bytes of memory to free.
  *
  * Free previously allocated special memory back to the specified pool.
  */
 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
 {
-	struct list_head *_chunk;
 	struct gen_pool_chunk *chunk;
 	unsigned long flags;
-	int order = pool->min_alloc_order;
-	int bit, nbits;
 
-	nbits = (size + (1UL << order) - 1) >> order;
+	if (!size)
+		return;
+
+	addr = addr >> pool->order;
+	size = (size + (1UL << pool->order) - 1) >> pool->order;
+
+	BUG_ON(addr + size < addr);
 
 	read_lock(&pool->lock);
-	list_for_each(_chunk, &pool->chunks) {
-		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
-
-		if (addr >= chunk->start_addr && addr < chunk->end_addr) {
-			BUG_ON(addr + size > chunk->end_addr);
+	list_for_each_entry(chunk, &pool->chunks, next_chunk)
+		if (addr >= chunk->start &&
+		    addr + size <= chunk->start + chunk->size) {
 			spin_lock_irqsave(&chunk->lock, flags);
-			bit = (addr - chunk->start_addr) >> order;
-			while (nbits--)
-				__clear_bit(bit++, chunk->bits);
+			bitmap_clear(chunk->bits, addr - chunk->start, size);
 			spin_unlock_irqrestore(&chunk->lock, flags);
-			break;
+			goto done;
 		}
-	}
-	BUG_ON(nbits > 0);
+	BUG_ON(1);
+done:
 	read_unlock(&pool->lock);
 }
 EXPORT_SYMBOL(gen_pool_free);
diff --git a/lib/memory_alloc.c b/lib/memory_alloc.c
new file mode 100644
index 0000000..2e020de
--- /dev/null
+++ b/lib/memory_alloc.c
@@ -0,0 +1,328 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/memory_alloc.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+
+#define MAX_MEMPOOLS 8
+
+struct mem_pool mpools[MAX_MEMPOOLS];
+
+/* The tree contains all allocations over all memory pools */
+static struct rb_root alloc_root;
+static struct mutex alloc_mutex;
+
+static struct alloc *find_alloc(void *addr)
+{
+	struct rb_root *root = &alloc_root;
+	struct rb_node *p = root->rb_node;
+
+	mutex_lock(&alloc_mutex);
+
+	while (p) {
+		struct alloc *node;
+
+		node = rb_entry(p, struct alloc, rb_node);
+		if (addr < node->vaddr)
+			p = p->rb_left;
+		else if (addr > node->vaddr)
+			p = p->rb_right;
+		else {
+			mutex_unlock(&alloc_mutex);
+			return node;
+		}
+	}
+	mutex_unlock(&alloc_mutex);
+	return NULL;
+}
+
+static int add_alloc(struct alloc *node)
+{
+	struct rb_root *root = &alloc_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+
+	mutex_lock(&alloc_mutex);
+	while (*p) {
+		struct alloc *tmp;
+		parent = *p;
+
+		tmp = rb_entry(parent, struct alloc, rb_node);
+
+		if (node->vaddr < tmp->vaddr)
+			p = &(*p)->rb_left;
+		else if (node->vaddr > tmp->vaddr)
+			p = &(*p)->rb_right;
+		else {
+			WARN(1, "memory at %p already allocated", tmp->vaddr);
+			mutex_unlock(&alloc_mutex);
+			return -EINVAL;
+		}
+	}
+	rb_link_node(&node->rb_node, parent, p);
+	rb_insert_color(&node->rb_node, root);
+	mutex_unlock(&alloc_mutex);
+	return 0;
+}
+
+static int remove_alloc(struct alloc *victim_node)
+{
+	struct rb_root *root = &alloc_root;
+	if (!victim_node)
+		return -EINVAL;
+
+	mutex_lock(&alloc_mutex);
+	rb_erase(&victim_node->rb_node, root);
+	mutex_unlock(&alloc_mutex);
+	return 0;
+}
+
+static struct gen_pool *initialize_gpool(unsigned long start,
+	unsigned long size)
+{
+	struct gen_pool *gpool;
+
+	gpool = gen_pool_create(PAGE_SHIFT, -1);
+
+	if (!gpool)
+		return NULL;
+	if (gen_pool_add(gpool, start, size, -1)) {
+		gen_pool_destroy(gpool);
+		return NULL;
+	}
+
+	return gpool;
+}
+
+static void *__alloc(struct mem_pool *mpool, unsigned long size,
+	unsigned long align, int cached)
+{
+	unsigned long paddr;
+	void __iomem *vaddr;
+
+	unsigned long aligned_size;
+	int log_align = ilog2(align);
+
+	struct alloc *node;
+
+	aligned_size = PFN_ALIGN(size);
+	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
+	if (!paddr)
+		return NULL;
+
+	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
+	if (!node)
+		goto out;
+
+	if (cached)
+		vaddr = ioremap_cached(paddr, aligned_size);
+	else
+		vaddr = ioremap(paddr, aligned_size);
+
+	if (!vaddr)
+		goto out_kfree;
+
+	node->vaddr = vaddr;
+	node->paddr = paddr;
+	node->len = aligned_size;
+	node->mpool = mpool;
+	if (add_alloc(node))
+		goto out_kfree;
+
+	mpool->free -= aligned_size;
+
+	return vaddr;
+out_kfree:
+	if (vaddr)
+		iounmap(vaddr);
+	kfree(node);
+out:
+	gen_pool_free(mpool->gpool, paddr, aligned_size);
+	return NULL;
+}
+
+static void __free(void *vaddr, bool unmap)
+{
+	struct alloc *node = find_alloc(vaddr);
+
+	if (!node)
+		return;
+
+	if (unmap)
+		iounmap(node->vaddr);
+
+	gen_pool_free(node->mpool->gpool, node->paddr, node->len);
+	node->mpool->free += node->len;
+
+	remove_alloc(node);
+	kfree(node);
+}
+
+static struct mem_pool *mem_type_to_memory_pool(int mem_type)
+{
+	struct mem_pool *mpool = &mpools[mem_type];
+
+	if (!mpool->size)
+		return NULL;
+
+	mutex_lock(&mpool->pool_mutex);
+	if (!mpool->gpool)
+		mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
+	mutex_unlock(&mpool->pool_mutex);
+	if (!mpool->gpool)
+		return NULL;
+
+	return mpool;
+}
+
+struct mem_pool *initialize_memory_pool(unsigned long start,
+	unsigned long size, int mem_type)
+{
+	int id = mem_type;
+
+	if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
+		return NULL;
+
+	mutex_lock(&mpools[id].pool_mutex);
+
+	mpools[id].paddr = start;
+	mpools[id].size = size;
+	mpools[id].free = size;
+	mutex_unlock(&mpools[id].pool_mutex);
+
+	pr_info("memory pool %d (start %lx size %lx) initialized\n",
+		id, start, size);
+	return &mpools[id];
+}
+EXPORT_SYMBOL_GPL(initialize_memory_pool);
+
+void *allocate_contiguous_memory(unsigned long size,
+	int mem_type, unsigned long align, int cached)
+{
+	unsigned long aligned_size = PFN_ALIGN(size);
+	struct mem_pool *mpool;
+
+	mpool = mem_type_to_memory_pool(mem_type);
+	if (!mpool)
+		return NULL;
+	return __alloc(mpool, aligned_size, align, cached);
+
+}
+EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
+
+unsigned long allocate_contiguous_memory_nomap(unsigned long size,
+	int mem_type, unsigned long align)
+{
+	unsigned long paddr;
+	unsigned long aligned_size;
+
+	struct alloc *node;
+	struct mem_pool *mpool;
+	int log_align = ilog2(align);
+
+	mpool = mem_type_to_memory_pool(mem_type);
+	if (!mpool || !mpool->gpool)
+		return 0;
+
+	aligned_size = PFN_ALIGN(size);
+	paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
+	if (!paddr)
+		return 0;
+
+	node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
+	if (!node)
+		goto out;
+
+	node->paddr = paddr;
+
+	/* We search the tree using node->vaddr, so set
+	 * it to something unique even though we don't
+	 * use it for physical allocation nodes.
+	 * The virtual and physical address ranges
+	 * are disjoint, so there won't be any chance of
+	 * a duplicate node->vaddr value.
+	 */
+	node->vaddr = (void *)paddr;
+	node->len = aligned_size;
+	node->mpool = mpool;
+	if (add_alloc(node))
+		goto out_kfree;
+
+	mpool->free -= aligned_size;
+	return paddr;
+out_kfree:
+	kfree(node);
+out:
+	gen_pool_free(mpool->gpool, paddr, aligned_size);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
+
+void free_contiguous_memory(void *addr)
+{
+	if (!addr)
+		return;
+	__free(addr, true);
+	return;
+}
+EXPORT_SYMBOL_GPL(free_contiguous_memory);
+
+void free_contiguous_memory_by_paddr(unsigned long paddr)
+{
+	if (!paddr)
+		return;
+	__free((void *)paddr, false);
+	return;
+}
+EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
+
+unsigned long memory_pool_node_paddr(void *vaddr)
+{
+	struct alloc *node = find_alloc(vaddr);
+
+	if (!node)
+		return -EINVAL;
+
+	return node->paddr;
+}
+EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
+
+unsigned long memory_pool_node_len(void *vaddr)
+{
+	struct alloc *node = find_alloc(vaddr);
+
+	if (!node)
+		return -EINVAL;
+
+	return node->len;
+}
+EXPORT_SYMBOL_GPL(memory_pool_node_len);
+
+int __init memory_pool_init(void)
+{
+	int i;
+
+	alloc_root = RB_ROOT;
+	mutex_init(&alloc_mutex);
+	for (i = 0; i < ARRAY_SIZE(mpools); i++) {
+		mutex_init(&mpools[i].pool_mutex);
+		mpools[i].gpool = NULL;
+	}
+	return 0;
+}