UBI: create ubi_wl_entry slab on initialization

Similarly to ltree_entry_slab, it makes more sense to create
and destroy ubi_wl_entry slab on module initialization/exit.

Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8c..a405d40 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
 #define WL_MAX_FAILURES 32
 
 /**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
-	struct rb_node rb;
-	int ec;
-	int pnum;
-};
-
-/**
  * struct ubi_wl_prot_entry - PEB protection entry.
  * @rb_pnum: link in the @wl->prot.pnum RB-tree
  * @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@
 #define paranoid_check_in_wl_tree(e, root)
 #endif
 
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
 /**
  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
  * @e: the wear-leveling entry to add
@@ -878,14 +860,14 @@
 		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
 		err = schedule_erase(ubi, e2, 0);
 		if (err) {
-			kmem_cache_free(wl_entries_slab, e2);
+			kmem_cache_free(ubi_wl_entry_slab, e2);
 			ubi_ro_mode(ubi);
 		}
 	}
 
 	err = schedule_erase(ubi, e1, 0);
 	if (err) {
-		kmem_cache_free(wl_entries_slab, e1);
+		kmem_cache_free(ubi_wl_entry_slab, e1);
 		ubi_ro_mode(ubi);
 	}
 
@@ -920,14 +902,14 @@
 		dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
 		err = schedule_erase(ubi, e1, 0);
 		if (err) {
-			kmem_cache_free(wl_entries_slab, e1);
+			kmem_cache_free(ubi_wl_entry_slab, e1);
 			ubi_ro_mode(ubi);
 		}
 	}
 
 	err = schedule_erase(ubi, e2, 0);
 	if (err) {
-		kmem_cache_free(wl_entries_slab, e2);
+		kmem_cache_free(ubi_wl_entry_slab, e2);
 		ubi_ro_mode(ubi);
 	}
 
@@ -1020,7 +1002,7 @@
 	if (cancel) {
 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
 		kfree(wl_wrk);
-		kmem_cache_free(wl_entries_slab, e);
+		kmem_cache_free(ubi_wl_entry_slab, e);
 		return 0;
 	}
 
@@ -1049,7 +1031,7 @@
 
 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
 	kfree(wl_wrk);
-	kmem_cache_free(wl_entries_slab, e);
+	kmem_cache_free(ubi_wl_entry_slab, e);
 
 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
 	    err == -EBUSY) {
@@ -1294,7 +1276,7 @@
 					rb->rb_right = NULL;
 			}
 
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 		}
 	}
 }
@@ -1407,14 +1389,6 @@
 		return err;
 	}
 
-	if (ubi_devices_cnt == 0) {
-		wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
-						    sizeof(struct ubi_wl_entry),
-						    0, 0, NULL);
-		if (!wl_entries_slab)
-			return -ENOMEM;
-	}
-
 	err = -ENOMEM;
 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
 	if (!ubi->lookuptbl)
@@ -1423,7 +1397,7 @@
 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1431,7 +1405,7 @@
 		e->ec = seb->ec;
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, 0)) {
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 			goto out_free;
 		}
 	}
@@ -1439,7 +1413,7 @@
 	list_for_each_entry(seb, &si->free, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1453,7 +1427,7 @@
 	list_for_each_entry(seb, &si->corr, u.list) {
 		cond_resched();
 
-		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 		if (!e)
 			goto out_free;
 
@@ -1461,7 +1435,7 @@
 		e->ec = seb->ec;
 		ubi->lookuptbl[e->pnum] = e;
 		if (schedule_erase(ubi, e, 0)) {
-			kmem_cache_free(wl_entries_slab, e);
+			kmem_cache_free(ubi_wl_entry_slab, e);
 			goto out_free;
 		}
 	}
@@ -1470,7 +1444,7 @@
 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
 			cond_resched();
 
-			e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
 			if (!e)
 				goto out_free;
 
@@ -1510,8 +1484,6 @@
 	tree_destroy(&ubi->free);
 	tree_destroy(&ubi->scrub);
 	kfree(ubi->lookuptbl);
-	if (ubi_devices_cnt == 0)
-		kmem_cache_destroy(wl_entries_slab);
 	return err;
 }
 
@@ -1541,7 +1513,7 @@
 					rb->rb_right = NULL;
 			}
 
-			kmem_cache_free(wl_entries_slab, pe->e);
+			kmem_cache_free(ubi_wl_entry_slab, pe->e);
 			kfree(pe);
 		}
 	}
@@ -1565,8 +1537,6 @@
 	tree_destroy(&ubi->free);
 	tree_destroy(&ubi->scrub);
 	kfree(ubi->lookuptbl);
-	if (ubi_devices_cnt == 1)
-		kmem_cache_destroy(wl_entries_slab);
 }
 
 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID