| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (c) International Business Machines Corp., 2006 | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License as published by | 
 | 6 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 7 |  * (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | 
 | 12 |  * the GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 
 | 17 |  * | 
 | 18 |  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner | 
 | 19 |  */ | 
 | 20 |  | 
 | 21 | /* | 
 | 22 |  * UBI wear-leveling unit. | 
 | 23 |  * | 
 | 24 |  * This unit is responsible for wear-leveling. It works in terms of physical | 
 | 25 |  * eraseblocks and erase counters and knows nothing about logical eraseblocks, | 
 | 26 |  * volumes, etc. From this unit's perspective all physical eraseblocks are of | 
 | 27 |  * two types - used and free. Used physical eraseblocks are those that were | 
 | 28 |  * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are | 
 | 29 |  * those that were put by the 'ubi_wl_put_peb()' function. | 
 | 30 |  * | 
 | 31 |  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter | 
 | 32 |  * header. The rest of the physical eraseblock contains only 0xFF bytes. | 
 | 33 |  * | 
 | 34 |  * When physical eraseblocks are returned to the WL unit by means of the | 
 | 35 |  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is | 
 | 36 |  * done asynchronously in context of the per-UBI device background thread, | 
 | 37 |  * which is also managed by the WL unit. | 
 | 38 |  * | 
 | 39 |  * The wear-leveling is ensured by means of moving the contents of used | 
 | 40 |  * physical eraseblocks with low erase counter to free physical eraseblocks | 
 | 41 |  * with high erase counter. | 
 | 42 |  * | 
 | 43 |  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick | 
 | 44 |  * an "optimal" physical eraseblock. For example, when it is known that the | 
 | 45 |  * physical eraseblock will be "put" soon because it contains short-term data, | 
 | 46 |  * the WL unit may pick a free physical eraseblock with low erase counter, and | 
 | 47 |  * so forth. | 
 | 48 |  * | 
 | 49 |  * If the WL unit fails to erase a physical eraseblock, it marks it as bad. | 
 | 50 |  * | 
 | 51 |  * This unit is also responsible for scrubbing. If a bit-flip is detected in a | 
 | 52 |  * physical eraseblock, it has to be moved. Technically this is the same as | 
 | 53 |  * moving it for wear-leveling reasons. | 
 | 54 |  * | 
 | 55 |  * As it was said, for the UBI unit all physical eraseblocks are either "free" | 
 | 56 |  * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used | 
 | 57 |  * eraseblocks are kept in a set of different RB-trees: @wl->used, | 
 | 58 |  * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. | 
 | 59 |  * | 
 | 60 |  * Note, in this implementation, we keep a small in-RAM object for each physical | 
 | 61 |  * eraseblock. This is surely not a scalable solution. But it appears to be good | 
 | 62 |  * enough for moderately large flashes and it is simple. In future, one may | 
 | 63 |  * re-work this unit and make it more scalable. | 
 | 64 |  * | 
 | 65 |  * At the moment this unit does not utilize the sequence number, which was | 
 | 66 |  * introduced relatively recently. But it would be wise to do this because the | 
 | 67 |  * sequence number of a logical eraseblock characterizes how old is it. For | 
 | 68 |  * example, when we move a PEB with low erase counter, and we need to pick the | 
 | 69 |  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we | 
 | 70 |  * pick target PEB with an average EC if our PEB is not very "old". This is a | 
 | 71 |  * room for future re-works of the WL unit. | 
 | 72 |  * | 
 | 73 |  * FIXME: looks too complex, should be simplified (later). | 
 | 74 |  */ | 
 | 75 |  | 
 | 76 | #include <linux/slab.h> | 
 | 77 | #include <linux/crc32.h> | 
 | 78 | #include <linux/freezer.h> | 
 | 79 | #include <linux/kthread.h> | 
 | 80 | #include "ubi.h" | 
 | 81 |  | 
 | 82 | /* Number of physical eraseblocks reserved for wear-leveling purposes */ | 
 | 83 | #define WL_RESERVED_PEBS 1 | 
 | 84 |  | 
 | 85 | /* | 
 | 86 |  * How many erase cycles are short term, unknown, and long term physical | 
 | 87 |  * eraseblocks protected. | 
 | 88 |  */ | 
 | 89 | #define ST_PROTECTION 16 | 
 | 90 | #define U_PROTECTION  10 | 
 | 91 | #define LT_PROTECTION 4 | 
 | 92 |  | 
 | 93 | /* | 
 | 94 |  * Maximum difference between two erase counters. If this threshold is | 
 | 95 |  * exceeded, the WL unit starts moving data from used physical eraseblocks with | 
 | 96 |  * low erase counter to free physical eraseblocks with high erase counter. | 
 | 97 |  */ | 
 | 98 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD | 
 | 99 |  | 
 | 100 | /* | 
 | 101 |  * When a physical eraseblock is moved, the WL unit has to pick the target | 
 | 102 |  * physical eraseblock to move to. The simplest way would be just to pick the | 
 | 103 |  * one with the highest erase counter. But in certain workloads this could lead | 
 | 104 |  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a | 
 | 105 |  * situation when the picked physical eraseblock is constantly erased after the | 
 | 106 |  * data is written to it. So, we have a constant which limits the highest erase | 
 | 107 |  * counter of the free physical eraseblock to pick. Namely, the WL unit does | 
 | 108 |  * not pick eraseblocks with erase counter greater then the lowest erase | 
 | 109 |  * counter plus %WL_FREE_MAX_DIFF. | 
 | 110 |  */ | 
 | 111 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) | 
 | 112 |  | 
 | 113 | /* | 
 | 114 |  * Maximum number of consecutive background thread failures which is enough to | 
 | 115 |  * switch to read-only mode. | 
 | 116 |  */ | 
 | 117 | #define WL_MAX_FAILURES 32 | 
 | 118 |  | 
 | 119 | /** | 
 | 120 |  * struct ubi_wl_entry - wear-leveling entry. | 
 | 121 |  * @rb: link in the corresponding RB-tree | 
 | 122 |  * @ec: erase counter | 
 | 123 |  * @pnum: physical eraseblock number | 
 | 124 |  * | 
 | 125 |  * Each physical eraseblock has a corresponding &struct wl_entry object which | 
 | 126 |  * may be kept in different RB-trees. | 
 | 127 |  */ | 
 | 128 | struct ubi_wl_entry { | 
 | 129 | 	struct rb_node rb; | 
 | 130 | 	int ec; | 
 | 131 | 	int pnum; | 
 | 132 | }; | 
 | 133 |  | 
 | 134 | /** | 
 | 135 |  * struct ubi_wl_prot_entry - PEB protection entry. | 
 | 136 |  * @rb_pnum: link in the @wl->prot.pnum RB-tree | 
 | 137 |  * @rb_aec: link in the @wl->prot.aec RB-tree | 
 | 138 |  * @abs_ec: the absolute erase counter value when the protection ends | 
 | 139 |  * @e: the wear-leveling entry of the physical eraseblock under protection | 
 | 140 |  * | 
 | 141 |  * When the WL unit returns a physical eraseblock, the physical eraseblock is | 
 | 142 |  * protected from being moved for some "time". For this reason, the physical | 
 | 143 |  * eraseblock is not directly moved from the @wl->free tree to the @wl->used | 
 | 144 |  * tree. There is one more tree in between where this physical eraseblock is | 
 | 145 |  * temporarily stored (@wl->prot). | 
 | 146 |  * | 
 | 147 |  * All this protection stuff is needed because: | 
 | 148 |  *  o we don't want to move physical eraseblocks just after we have given them | 
 | 149 |  *    to the user; instead, we first want to let users fill them up with data; | 
 | 150 |  * | 
 | 151 |  *  o there is a chance that the user will put the physical eraseblock very | 
 | 152 |  *    soon, so it makes sense not to move it for some time, but wait; this is | 
 | 153 |  *    especially important in case of "short term" physical eraseblocks. | 
 | 154 |  * | 
 | 155 |  * Physical eraseblocks stay protected only for limited time. But the "time" is | 
 | 156 |  * measured in erase cycles in this case. This is implemented with help of the | 
 | 157 |  * absolute erase counter (@wl->abs_ec). When it reaches certain value, the | 
 | 158 |  * physical eraseblocks are moved from the protection trees (@wl->prot.*) to | 
 | 159 |  * the @wl->used tree. | 
 | 160 |  * | 
 | 161 |  * Protected physical eraseblocks are searched by physical eraseblock number | 
 | 162 |  * (when they are put) and by the absolute erase counter (to check if it is | 
 | 163 |  * time to move them to the @wl->used tree). So there are actually 2 RB-trees | 
 | 164 |  * storing the protected physical eraseblocks: @wl->prot.pnum and | 
 | 165 |  * @wl->prot.aec. They are referred to as the "protection" trees. The | 
 | 166 |  * first one is indexed by the physical eraseblock number. The second one is | 
 | 167 |  * indexed by the absolute erase counter. Both trees store | 
 | 168 |  * &struct ubi_wl_prot_entry objects. | 
 | 169 |  * | 
 | 170 |  * Each physical eraseblock has 2 main states: free and used. The former state | 
 | 171 |  * corresponds to the @wl->free tree. The latter state is split up on several | 
 | 172 |  * sub-states: | 
 | 173 |  * o the WL movement is allowed (@wl->used tree); | 
 | 174 |  * o the WL movement is temporarily prohibited (@wl->prot.pnum and | 
 | 175 |  * @wl->prot.aec trees); | 
 | 176 |  * o scrubbing is needed (@wl->scrub tree). | 
 | 177 |  * | 
 | 178 |  * Depending on the sub-state, wear-leveling entries of the used physical | 
 | 179 |  * eraseblocks may be kept in one of those trees. | 
 | 180 |  */ | 
 | 181 | struct ubi_wl_prot_entry { | 
 | 182 | 	struct rb_node rb_pnum; | 
 | 183 | 	struct rb_node rb_aec; | 
 | 184 | 	unsigned long long abs_ec; | 
 | 185 | 	struct ubi_wl_entry *e; | 
 | 186 | }; | 
 | 187 |  | 
 | 188 | /** | 
 | 189 |  * struct ubi_work - UBI work description data structure. | 
 | 190 |  * @list: a link in the list of pending works | 
 | 191 |  * @func: worker function | 
 | 192 |  * @priv: private data of the worker function | 
 | 193 |  * | 
 | 194 |  * @e: physical eraseblock to erase | 
 | 195 |  * @torture: if the physical eraseblock has to be tortured | 
 | 196 |  * | 
 | 197 |  * The @func pointer points to the worker function. If the @cancel argument is | 
 | 198 |  * not zero, the worker has to free the resources and exit immediately. The | 
 | 199 |  * worker has to return zero in case of success and a negative error code in | 
 | 200 |  * case of failure. | 
 | 201 |  */ | 
 | 202 | struct ubi_work { | 
 | 203 | 	struct list_head list; | 
 | 204 | 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); | 
 | 205 | 	/* The below fields are only relevant to erasure works */ | 
 | 206 | 	struct ubi_wl_entry *e; | 
 | 207 | 	int torture; | 
 | 208 | }; | 
 | 209 |  | 
 | 210 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 
| Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 211 | static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 212 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | 
 | 213 | 				     struct rb_root *root); | 
 | 214 | #else | 
 | 215 | #define paranoid_check_ec(ubi, pnum, ec) 0 | 
 | 216 | #define paranoid_check_in_wl_tree(e, root) | 
 | 217 | #endif | 
 | 218 |  | 
 | 219 | /* Slab cache for wear-leveling entries */ | 
 | 220 | static struct kmem_cache *wl_entries_slab; | 
 | 221 |  | 
 | 222 | /** | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 223 |  * wl_tree_add - add a wear-leveling entry to a WL RB-tree. | 
 | 224 |  * @e: the wear-leveling entry to add | 
 | 225 |  * @root: the root of the tree | 
 | 226 |  * | 
 | 227 |  * Note, we use (erase counter, physical eraseblock number) pairs as keys in | 
 | 228 |  * the @ubi->used and @ubi->free RB-trees. | 
 | 229 |  */ | 
 | 230 | static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) | 
 | 231 | { | 
 | 232 | 	struct rb_node **p, *parent = NULL; | 
 | 233 |  | 
 | 234 | 	p = &root->rb_node; | 
 | 235 | 	while (*p) { | 
 | 236 | 		struct ubi_wl_entry *e1; | 
 | 237 |  | 
 | 238 | 		parent = *p; | 
 | 239 | 		e1 = rb_entry(parent, struct ubi_wl_entry, rb); | 
 | 240 |  | 
 | 241 | 		if (e->ec < e1->ec) | 
 | 242 | 			p = &(*p)->rb_left; | 
 | 243 | 		else if (e->ec > e1->ec) | 
 | 244 | 			p = &(*p)->rb_right; | 
 | 245 | 		else { | 
 | 246 | 			ubi_assert(e->pnum != e1->pnum); | 
 | 247 | 			if (e->pnum < e1->pnum) | 
 | 248 | 				p = &(*p)->rb_left; | 
 | 249 | 			else | 
 | 250 | 				p = &(*p)->rb_right; | 
 | 251 | 		} | 
 | 252 | 	} | 
 | 253 |  | 
 | 254 | 	rb_link_node(&e->rb, parent, p); | 
 | 255 | 	rb_insert_color(&e->rb, root); | 
 | 256 | } | 
 | 257 |  | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 258 | /** | 
 | 259 |  * do_work - do one pending work. | 
 | 260 |  * @ubi: UBI device description object | 
 | 261 |  * | 
 | 262 |  * This function returns zero in case of success and a negative error code in | 
 | 263 |  * case of failure. | 
 | 264 |  */ | 
 | 265 | static int do_work(struct ubi_device *ubi) | 
 | 266 | { | 
 | 267 | 	int err; | 
 | 268 | 	struct ubi_work *wrk; | 
 | 269 |  | 
 | 270 | 	spin_lock(&ubi->wl_lock); | 
 | 271 |  | 
 | 272 | 	if (list_empty(&ubi->works)) { | 
 | 273 | 		spin_unlock(&ubi->wl_lock); | 
 | 274 | 		return 0; | 
 | 275 | 	} | 
 | 276 |  | 
 | 277 | 	wrk = list_entry(ubi->works.next, struct ubi_work, list); | 
 | 278 | 	list_del(&wrk->list); | 
 | 279 | 	spin_unlock(&ubi->wl_lock); | 
 | 280 |  | 
 | 281 | 	/* | 
 | 282 | 	 * Call the worker function. Do not touch the work structure | 
 | 283 | 	 * after this call as it will have been freed or reused by that | 
 | 284 | 	 * time by the worker function. | 
 | 285 | 	 */ | 
 | 286 | 	err = wrk->func(ubi, wrk, 0); | 
 | 287 | 	if (err) | 
 | 288 | 		ubi_err("work failed with error code %d", err); | 
 | 289 |  | 
 | 290 | 	spin_lock(&ubi->wl_lock); | 
 | 291 | 	ubi->works_count -= 1; | 
 | 292 | 	ubi_assert(ubi->works_count >= 0); | 
 | 293 | 	spin_unlock(&ubi->wl_lock); | 
 | 294 | 	return err; | 
 | 295 | } | 
 | 296 |  | 
 | 297 | /** | 
 | 298 |  * produce_free_peb - produce a free physical eraseblock. | 
 | 299 |  * @ubi: UBI device description object | 
 | 300 |  * | 
 | 301 |  * This function tries to make a free PEB by means of synchronous execution of | 
 | 302 |  * pending works. This may be needed if, for example the background thread is | 
 | 303 |  * disabled. Returns zero in case of success and a negative error code in case | 
 | 304 |  * of failure. | 
 | 305 |  */ | 
 | 306 | static int produce_free_peb(struct ubi_device *ubi) | 
 | 307 | { | 
 | 308 | 	int err; | 
 | 309 |  | 
 | 310 | 	spin_lock(&ubi->wl_lock); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 311 | 	while (!ubi->free.rb_node) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 312 | 		spin_unlock(&ubi->wl_lock); | 
 | 313 |  | 
 | 314 | 		dbg_wl("do one work synchronously"); | 
 | 315 | 		err = do_work(ubi); | 
 | 316 | 		if (err) | 
 | 317 | 			return err; | 
 | 318 |  | 
 | 319 | 		spin_lock(&ubi->wl_lock); | 
 | 320 | 	} | 
 | 321 | 	spin_unlock(&ubi->wl_lock); | 
 | 322 |  | 
 | 323 | 	return 0; | 
 | 324 | } | 
 | 325 |  | 
 | 326 | /** | 
 | 327 |  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. | 
 | 328 |  * @e: the wear-leveling entry to check | 
 | 329 |  * @root: the root of the tree | 
 | 330 |  * | 
 | 331 |  * This function returns non-zero if @e is in the @root RB-tree and zero if it | 
 | 332 |  * is not. | 
 | 333 |  */ | 
 | 334 | static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) | 
 | 335 | { | 
 | 336 | 	struct rb_node *p; | 
 | 337 |  | 
 | 338 | 	p = root->rb_node; | 
 | 339 | 	while (p) { | 
 | 340 | 		struct ubi_wl_entry *e1; | 
 | 341 |  | 
 | 342 | 		e1 = rb_entry(p, struct ubi_wl_entry, rb); | 
 | 343 |  | 
 | 344 | 		if (e->pnum == e1->pnum) { | 
 | 345 | 			ubi_assert(e == e1); | 
 | 346 | 			return 1; | 
 | 347 | 		} | 
 | 348 |  | 
 | 349 | 		if (e->ec < e1->ec) | 
 | 350 | 			p = p->rb_left; | 
 | 351 | 		else if (e->ec > e1->ec) | 
 | 352 | 			p = p->rb_right; | 
 | 353 | 		else { | 
 | 354 | 			ubi_assert(e->pnum != e1->pnum); | 
 | 355 | 			if (e->pnum < e1->pnum) | 
 | 356 | 				p = p->rb_left; | 
 | 357 | 			else | 
 | 358 | 				p = p->rb_right; | 
 | 359 | 		} | 
 | 360 | 	} | 
 | 361 |  | 
 | 362 | 	return 0; | 
 | 363 | } | 
 | 364 |  | 
 | 365 | /** | 
 | 366 |  * prot_tree_add - add physical eraseblock to protection trees. | 
 | 367 |  * @ubi: UBI device description object | 
 | 368 |  * @e: the physical eraseblock to add | 
 | 369 |  * @pe: protection entry object to use | 
 | 370 |  * @abs_ec: absolute erase counter value when this physical eraseblock has | 
 | 371 |  * to be removed from the protection trees. | 
 | 372 |  * | 
 | 373 |  * @wl->lock has to be locked. | 
 | 374 |  */ | 
 | 375 | static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, | 
 | 376 | 			  struct ubi_wl_prot_entry *pe, int abs_ec) | 
 | 377 | { | 
 | 378 | 	struct rb_node **p, *parent = NULL; | 
 | 379 | 	struct ubi_wl_prot_entry *pe1; | 
 | 380 |  | 
 | 381 | 	pe->e = e; | 
 | 382 | 	pe->abs_ec = ubi->abs_ec + abs_ec; | 
 | 383 |  | 
 | 384 | 	p = &ubi->prot.pnum.rb_node; | 
 | 385 | 	while (*p) { | 
 | 386 | 		parent = *p; | 
 | 387 | 		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); | 
 | 388 |  | 
 | 389 | 		if (e->pnum < pe1->e->pnum) | 
 | 390 | 			p = &(*p)->rb_left; | 
 | 391 | 		else | 
 | 392 | 			p = &(*p)->rb_right; | 
 | 393 | 	} | 
 | 394 | 	rb_link_node(&pe->rb_pnum, parent, p); | 
 | 395 | 	rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); | 
 | 396 |  | 
 | 397 | 	p = &ubi->prot.aec.rb_node; | 
 | 398 | 	parent = NULL; | 
 | 399 | 	while (*p) { | 
 | 400 | 		parent = *p; | 
 | 401 | 		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); | 
 | 402 |  | 
 | 403 | 		if (pe->abs_ec < pe1->abs_ec) | 
 | 404 | 			p = &(*p)->rb_left; | 
 | 405 | 		else | 
 | 406 | 			p = &(*p)->rb_right; | 
 | 407 | 	} | 
 | 408 | 	rb_link_node(&pe->rb_aec, parent, p); | 
 | 409 | 	rb_insert_color(&pe->rb_aec, &ubi->prot.aec); | 
 | 410 | } | 
 | 411 |  | 
 | 412 | /** | 
 | 413 |  * find_wl_entry - find wear-leveling entry closest to certain erase counter. | 
 | 414 |  * @root: the RB-tree where to look for | 
 | 415 |  * @max: highest possible erase counter | 
 | 416 |  * | 
 | 417 |  * This function looks for a wear leveling entry with erase counter closest to | 
 | 418 |  * @max and less then @max. | 
 | 419 |  */ | 
 | 420 | static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) | 
 | 421 | { | 
 | 422 | 	struct rb_node *p; | 
 | 423 | 	struct ubi_wl_entry *e; | 
 | 424 |  | 
 | 425 | 	e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); | 
 | 426 | 	max += e->ec; | 
 | 427 |  | 
 | 428 | 	p = root->rb_node; | 
 | 429 | 	while (p) { | 
 | 430 | 		struct ubi_wl_entry *e1; | 
 | 431 |  | 
 | 432 | 		e1 = rb_entry(p, struct ubi_wl_entry, rb); | 
 | 433 | 		if (e1->ec >= max) | 
 | 434 | 			p = p->rb_left; | 
 | 435 | 		else { | 
 | 436 | 			p = p->rb_right; | 
 | 437 | 			e = e1; | 
 | 438 | 		} | 
 | 439 | 	} | 
 | 440 |  | 
 | 441 | 	return e; | 
 | 442 | } | 
 | 443 |  | 
 | 444 | /** | 
 | 445 |  * ubi_wl_get_peb - get a physical eraseblock. | 
 | 446 |  * @ubi: UBI device description object | 
 | 447 |  * @dtype: type of data which will be stored in this physical eraseblock | 
 | 448 |  * | 
 | 449 |  * This function returns a physical eraseblock in case of success and a | 
 | 450 |  * negative error code in case of failure. Might sleep. | 
 | 451 |  */ | 
 | 452 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) | 
 | 453 | { | 
 | 454 | 	int err, protect, medium_ec; | 
 | 455 | 	struct ubi_wl_entry *e, *first, *last; | 
 | 456 | 	struct ubi_wl_prot_entry *pe; | 
 | 457 |  | 
 | 458 | 	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || | 
 | 459 | 		   dtype == UBI_UNKNOWN); | 
 | 460 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 461 | 	pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 462 | 	if (!pe) | 
 | 463 | 		return -ENOMEM; | 
 | 464 |  | 
 | 465 | retry: | 
 | 466 | 	spin_lock(&ubi->wl_lock); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 467 | 	if (!ubi->free.rb_node) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 468 | 		if (ubi->works_count == 0) { | 
 | 469 | 			ubi_assert(list_empty(&ubi->works)); | 
 | 470 | 			ubi_err("no free eraseblocks"); | 
 | 471 | 			spin_unlock(&ubi->wl_lock); | 
 | 472 | 			kfree(pe); | 
 | 473 | 			return -ENOSPC; | 
 | 474 | 		} | 
 | 475 | 		spin_unlock(&ubi->wl_lock); | 
 | 476 |  | 
 | 477 | 		err = produce_free_peb(ubi); | 
 | 478 | 		if (err < 0) { | 
 | 479 | 			kfree(pe); | 
 | 480 | 			return err; | 
 | 481 | 		} | 
 | 482 | 		goto retry; | 
 | 483 | 	} | 
 | 484 |  | 
 | 485 | 	switch (dtype) { | 
 | 486 | 		case UBI_LONGTERM: | 
 | 487 | 			/* | 
 | 488 | 			 * For long term data we pick a physical eraseblock | 
 | 489 | 			 * with high erase counter. But the highest erase | 
 | 490 | 			 * counter we can pick is bounded by the the lowest | 
 | 491 | 			 * erase counter plus %WL_FREE_MAX_DIFF. | 
 | 492 | 			 */ | 
 | 493 | 			e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 
 | 494 | 			protect = LT_PROTECTION; | 
 | 495 | 			break; | 
 | 496 | 		case UBI_UNKNOWN: | 
 | 497 | 			/* | 
 | 498 | 			 * For unknown data we pick a physical eraseblock with | 
 | 499 | 			 * medium erase counter. But we by no means can pick a | 
 | 500 | 			 * physical eraseblock with erase counter greater or | 
 | 501 | 			 * equivalent than the lowest erase counter plus | 
 | 502 | 			 * %WL_FREE_MAX_DIFF. | 
 | 503 | 			 */ | 
 | 504 | 			first = rb_entry(rb_first(&ubi->free), | 
 | 505 | 					 struct ubi_wl_entry, rb); | 
 | 506 | 			last = rb_entry(rb_last(&ubi->free), | 
 | 507 | 					struct ubi_wl_entry, rb); | 
 | 508 |  | 
 | 509 | 			if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 
 | 510 | 				e = rb_entry(ubi->free.rb_node, | 
 | 511 | 						struct ubi_wl_entry, rb); | 
 | 512 | 			else { | 
 | 513 | 				medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; | 
 | 514 | 				e = find_wl_entry(&ubi->free, medium_ec); | 
 | 515 | 			} | 
 | 516 | 			protect = U_PROTECTION; | 
 | 517 | 			break; | 
 | 518 | 		case UBI_SHORTTERM: | 
 | 519 | 			/* | 
 | 520 | 			 * For short term data we pick a physical eraseblock | 
 | 521 | 			 * with the lowest erase counter as we expect it will | 
 | 522 | 			 * be erased soon. | 
 | 523 | 			 */ | 
 | 524 | 			e = rb_entry(rb_first(&ubi->free), | 
 | 525 | 				     struct ubi_wl_entry, rb); | 
 | 526 | 			protect = ST_PROTECTION; | 
 | 527 | 			break; | 
 | 528 | 		default: | 
 | 529 | 			protect = 0; | 
 | 530 | 			e = NULL; | 
 | 531 | 			BUG(); | 
 | 532 | 	} | 
 | 533 |  | 
 | 534 | 	/* | 
 | 535 | 	 * Move the physical eraseblock to the protection trees where it will | 
 | 536 | 	 * be protected from being moved for some time. | 
 | 537 | 	 */ | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 538 | 	paranoid_check_in_wl_tree(e, &ubi->free); | 
 | 539 | 	rb_erase(&e->rb, &ubi->free); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 540 | 	prot_tree_add(ubi, e, pe, protect); | 
 | 541 |  | 
 | 542 | 	dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); | 
 | 543 | 	spin_unlock(&ubi->wl_lock); | 
 | 544 |  | 
 | 545 | 	return e->pnum; | 
 | 546 | } | 
 | 547 |  | 
 | 548 | /** | 
 | 549 |  * prot_tree_del - remove a physical eraseblock from the protection trees | 
 | 550 |  * @ubi: UBI device description object | 
 | 551 |  * @pnum: the physical eraseblock to remove | 
 | 552 |  */ | 
 | 553 | static void prot_tree_del(struct ubi_device *ubi, int pnum) | 
 | 554 | { | 
 | 555 | 	struct rb_node *p; | 
 | 556 | 	struct ubi_wl_prot_entry *pe = NULL; | 
 | 557 |  | 
 | 558 | 	p = ubi->prot.pnum.rb_node; | 
 | 559 | 	while (p) { | 
 | 560 |  | 
 | 561 | 		pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); | 
 | 562 |  | 
 | 563 | 		if (pnum == pe->e->pnum) | 
 | 564 | 			break; | 
 | 565 |  | 
 | 566 | 		if (pnum < pe->e->pnum) | 
 | 567 | 			p = p->rb_left; | 
 | 568 | 		else | 
 | 569 | 			p = p->rb_right; | 
 | 570 | 	} | 
 | 571 |  | 
 | 572 | 	ubi_assert(pe->e->pnum == pnum); | 
 | 573 | 	rb_erase(&pe->rb_aec, &ubi->prot.aec); | 
 | 574 | 	rb_erase(&pe->rb_pnum, &ubi->prot.pnum); | 
 | 575 | 	kfree(pe); | 
 | 576 | } | 
 | 577 |  | 
 | 578 | /** | 
 | 579 |  * sync_erase - synchronously erase a physical eraseblock. | 
 | 580 |  * @ubi: UBI device description object | 
 | 581 |  * @e: the the physical eraseblock to erase | 
 | 582 |  * @torture: if the physical eraseblock has to be tortured | 
 | 583 |  * | 
 | 584 |  * This function returns zero in case of success and a negative error code in | 
 | 585 |  * case of failure. | 
 | 586 |  */ | 
 | 587 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) | 
 | 588 | { | 
 | 589 | 	int err; | 
 | 590 | 	struct ubi_ec_hdr *ec_hdr; | 
 | 591 | 	unsigned long long ec = e->ec; | 
 | 592 |  | 
 | 593 | 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); | 
 | 594 |  | 
 | 595 | 	err = paranoid_check_ec(ubi, e->pnum, e->ec); | 
 | 596 | 	if (err > 0) | 
 | 597 | 		return -EINVAL; | 
 | 598 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 599 | 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 600 | 	if (!ec_hdr) | 
 | 601 | 		return -ENOMEM; | 
 | 602 |  | 
 | 603 | 	err = ubi_io_sync_erase(ubi, e->pnum, torture); | 
 | 604 | 	if (err < 0) | 
 | 605 | 		goto out_free; | 
 | 606 |  | 
 | 607 | 	ec += err; | 
 | 608 | 	if (ec > UBI_MAX_ERASECOUNTER) { | 
 | 609 | 		/* | 
 | 610 | 		 * Erase counter overflow. Upgrade UBI and use 64-bit | 
 | 611 | 		 * erase counters internally. | 
 | 612 | 		 */ | 
 | 613 | 		ubi_err("erase counter overflow at PEB %d, EC %llu", | 
 | 614 | 			e->pnum, ec); | 
 | 615 | 		err = -EINVAL; | 
 | 616 | 		goto out_free; | 
 | 617 | 	} | 
 | 618 |  | 
 | 619 | 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); | 
 | 620 |  | 
| Christoph Hellwig | 3261ebd | 2007-05-21 17:41:46 +0300 | [diff] [blame] | 621 | 	ec_hdr->ec = cpu_to_be64(ec); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 622 |  | 
 | 623 | 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); | 
 | 624 | 	if (err) | 
 | 625 | 		goto out_free; | 
 | 626 |  | 
 | 627 | 	e->ec = ec; | 
 | 628 | 	spin_lock(&ubi->wl_lock); | 
 | 629 | 	if (e->ec > ubi->max_ec) | 
 | 630 | 		ubi->max_ec = e->ec; | 
 | 631 | 	spin_unlock(&ubi->wl_lock); | 
 | 632 |  | 
 | 633 | out_free: | 
 | 634 | 	kfree(ec_hdr); | 
 | 635 | 	return err; | 
 | 636 | } | 
 | 637 |  | 
 | 638 | /** | 
 | 639 |  * check_protection_over - check if it is time to stop protecting some | 
 | 640 |  * physical eraseblocks. | 
 | 641 |  * @ubi: UBI device description object | 
 | 642 |  * | 
 | 643 |  * This function is called after each erase operation, when the absolute erase | 
 | 644 |  * counter is incremented, to check if some physical eraseblock  have not to be | 
 | 645 |  * protected any longer. These physical eraseblocks are moved from the | 
 | 646 |  * protection trees to the used tree. | 
 | 647 |  */ | 
 | 648 | static void check_protection_over(struct ubi_device *ubi) | 
 | 649 | { | 
 | 650 | 	struct ubi_wl_prot_entry *pe; | 
 | 651 |  | 
 | 652 | 	/* | 
 | 653 | 	 * There may be several protected physical eraseblock to remove, | 
 | 654 | 	 * process them all. | 
 | 655 | 	 */ | 
 | 656 | 	while (1) { | 
 | 657 | 		spin_lock(&ubi->wl_lock); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 658 | 		if (!ubi->prot.aec.rb_node) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 659 | 			spin_unlock(&ubi->wl_lock); | 
 | 660 | 			break; | 
 | 661 | 		} | 
 | 662 |  | 
 | 663 | 		pe = rb_entry(rb_first(&ubi->prot.aec), | 
 | 664 | 			      struct ubi_wl_prot_entry, rb_aec); | 
 | 665 |  | 
 | 666 | 		if (pe->abs_ec > ubi->abs_ec) { | 
 | 667 | 			spin_unlock(&ubi->wl_lock); | 
 | 668 | 			break; | 
 | 669 | 		} | 
 | 670 |  | 
 | 671 | 		dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", | 
 | 672 | 		       pe->e->pnum, ubi->abs_ec, pe->abs_ec); | 
 | 673 | 		rb_erase(&pe->rb_aec, &ubi->prot.aec); | 
 | 674 | 		rb_erase(&pe->rb_pnum, &ubi->prot.pnum); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 675 | 		wl_tree_add(pe->e, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 676 | 		spin_unlock(&ubi->wl_lock); | 
 | 677 |  | 
 | 678 | 		kfree(pe); | 
 | 679 | 		cond_resched(); | 
 | 680 | 	} | 
 | 681 | } | 
 | 682 |  | 
 | 683 | /** | 
 | 684 |  * schedule_ubi_work - schedule a work. | 
 | 685 |  * @ubi: UBI device description object | 
 | 686 |  * @wrk: the work to schedule | 
 | 687 |  * | 
 | 688 |  * This function enqueues a work defined by @wrk to the tail of the pending | 
 | 689 |  * works list. | 
 | 690 |  */ | 
 | 691 | static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) | 
 | 692 | { | 
 | 693 | 	spin_lock(&ubi->wl_lock); | 
 | 694 | 	list_add_tail(&wrk->list, &ubi->works); | 
 | 695 | 	ubi_assert(ubi->works_count >= 0); | 
 | 696 | 	ubi->works_count += 1; | 
 | 697 | 	if (ubi->thread_enabled) | 
 | 698 | 		wake_up_process(ubi->bgt_thread); | 
 | 699 | 	spin_unlock(&ubi->wl_lock); | 
 | 700 | } | 
 | 701 |  | 
 | 702 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 
 | 703 | 			int cancel); | 
 | 704 |  | 
 | 705 | /** | 
 | 706 |  * schedule_erase - schedule an erase work. | 
 | 707 |  * @ubi: UBI device description object | 
 | 708 |  * @e: the WL entry of the physical eraseblock to erase | 
 | 709 |  * @torture: if the physical eraseblock has to be tortured | 
 | 710 |  * | 
 | 711 |  * This function returns zero in case of success and a %-ENOMEM in case of | 
 | 712 |  * failure. | 
 | 713 |  */ | 
 | 714 | static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | 
 | 715 | 			  int torture) | 
 | 716 | { | 
 | 717 | 	struct ubi_work *wl_wrk; | 
 | 718 |  | 
 | 719 | 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 
 | 720 | 	       e->pnum, e->ec, torture); | 
 | 721 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 722 | 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 723 | 	if (!wl_wrk) | 
 | 724 | 		return -ENOMEM; | 
 | 725 |  | 
 | 726 | 	wl_wrk->func = &erase_worker; | 
 | 727 | 	wl_wrk->e = e; | 
 | 728 | 	wl_wrk->torture = torture; | 
 | 729 |  | 
 | 730 | 	schedule_ubi_work(ubi, wl_wrk); | 
 | 731 | 	return 0; | 
 | 732 | } | 
 | 733 |  | 
 | 734 | /** | 
 | 735 |  * wear_leveling_worker - wear-leveling worker function. | 
 | 736 |  * @ubi: UBI device description object | 
 | 737 |  * @wrk: the work object | 
 | 738 |  * @cancel: non-zero if the worker has to free memory and exit | 
 | 739 |  * | 
 | 740 |  * This function copies a more worn out physical eraseblock to a less worn out | 
 | 741 |  * one. Returns zero in case of success and a negative error code in case of | 
 | 742 |  * failure. | 
 | 743 |  */ | 
 | 744 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 
 | 745 | 				int cancel) | 
 | 746 | { | 
 | 747 | 	int err, put = 0; | 
 | 748 | 	struct ubi_wl_entry *e1, *e2; | 
 | 749 | 	struct ubi_vid_hdr *vid_hdr; | 
 | 750 |  | 
 | 751 | 	kfree(wrk); | 
 | 752 |  | 
 | 753 | 	if (cancel) | 
 | 754 | 		return 0; | 
 | 755 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 756 | 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 757 | 	if (!vid_hdr) | 
 | 758 | 		return -ENOMEM; | 
 | 759 |  | 
 | 760 | 	spin_lock(&ubi->wl_lock); | 
 | 761 |  | 
 | 762 | 	/* | 
 | 763 | 	 * Only one WL worker at a time is supported at this implementation, so | 
 | 764 | 	 * make sure a PEB is not being moved already. | 
 | 765 | 	 */ | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 766 | 	if (ubi->move_to || !ubi->free.rb_node || | 
 | 767 | 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 768 | 		/* | 
 | 769 | 		 * Only one WL worker at a time is supported at this | 
 | 770 | 		 * implementation, so if a LEB is already being moved, cancel. | 
 | 771 | 		 * | 
 | 772 | 		 * No free physical eraseblocks? Well, we cancel wear-leveling | 
 | 773 | 		 * then. It will be triggered again when a free physical | 
 | 774 | 		 * eraseblock appears. | 
 | 775 | 		 * | 
 | 776 | 		 * No used physical eraseblocks? They must be temporarily | 
 | 777 | 		 * protected from being moved. They will be moved to the | 
 | 778 | 		 * @ubi->used tree later and the wear-leveling will be | 
 | 779 | 		 * triggered again. | 
 | 780 | 		 */ | 
 | 781 | 		dbg_wl("cancel WL, a list is empty: free %d, used %d", | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 782 | 		       !ubi->free.rb_node, !ubi->used.rb_node); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 783 | 		ubi->wl_scheduled = 0; | 
 | 784 | 		spin_unlock(&ubi->wl_lock); | 
 | 785 | 		ubi_free_vid_hdr(ubi, vid_hdr); | 
 | 786 | 		return 0; | 
 | 787 | 	} | 
 | 788 |  | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 789 | 	if (!ubi->scrub.rb_node) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 790 | 		/* | 
 | 791 | 		 * Now pick the least worn-out used physical eraseblock and a | 
 | 792 | 		 * highly worn-out free physical eraseblock. If the erase | 
 | 793 | 		 * counters differ much enough, start wear-leveling. | 
 | 794 | 		 */ | 
 | 795 | 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | 
 | 796 | 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 
 | 797 |  | 
 | 798 | 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 
 | 799 | 			dbg_wl("no WL needed: min used EC %d, max free EC %d", | 
 | 800 | 			       e1->ec, e2->ec); | 
 | 801 | 			ubi->wl_scheduled = 0; | 
 | 802 | 			spin_unlock(&ubi->wl_lock); | 
 | 803 | 			ubi_free_vid_hdr(ubi, vid_hdr); | 
 | 804 | 			return 0; | 
 | 805 | 		} | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 806 | 		paranoid_check_in_wl_tree(e1, &ubi->used); | 
 | 807 | 		rb_erase(&e1->rb, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 808 | 		dbg_wl("move PEB %d EC %d to PEB %d EC %d", | 
 | 809 | 		       e1->pnum, e1->ec, e2->pnum, e2->ec); | 
 | 810 | 	} else { | 
 | 811 | 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); | 
 | 812 | 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 813 | 		paranoid_check_in_wl_tree(e1, &ubi->scrub); | 
 | 814 | 	rb_erase(&e1->rb, &ubi->scrub); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 815 | 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); | 
 | 816 | 	} | 
 | 817 |  | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 818 | 	paranoid_check_in_wl_tree(e2, &ubi->free); | 
 | 819 | 	rb_erase(&e2->rb, &ubi->free); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 820 | 	ubi_assert(!ubi->move_from && !ubi->move_to); | 
 | 821 | 	ubi_assert(!ubi->move_to_put && !ubi->move_from_put); | 
 | 822 | 	ubi->move_from = e1; | 
 | 823 | 	ubi->move_to = e2; | 
 | 824 | 	spin_unlock(&ubi->wl_lock); | 
 | 825 |  | 
 | 826 | 	/* | 
 | 827 | 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. | 
 | 828 | 	 * We so far do not know which logical eraseblock our physical | 
 | 829 | 	 * eraseblock (@e1) belongs to. We have to read the volume identifier | 
 | 830 | 	 * header first. | 
 | 831 | 	 */ | 
 | 832 |  | 
 | 833 | 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); | 
 | 834 | 	if (err && err != UBI_IO_BITFLIPS) { | 
 | 835 | 		if (err == UBI_IO_PEB_FREE) { | 
 | 836 | 			/* | 
 | 837 | 			 * We are trying to move PEB without a VID header. UBI | 
 | 838 | 			 * always write VID headers shortly after the PEB was | 
 | 839 | 			 * given, so we have a situation when it did not have | 
 | 840 | 			 * chance to write it down because it was preempted. | 
 | 841 | 			 * Just re-schedule the work, so that next time it will | 
 | 842 | 			 * likely have the VID header in place. | 
 | 843 | 			 */ | 
 | 844 | 			dbg_wl("PEB %d has no VID header", e1->pnum); | 
 | 845 | 			err = 0; | 
 | 846 | 		} else { | 
 | 847 | 			ubi_err("error %d while reading VID header from PEB %d", | 
 | 848 | 				err, e1->pnum); | 
 | 849 | 			if (err > 0) | 
 | 850 | 				err = -EIO; | 
 | 851 | 		} | 
 | 852 | 		goto error; | 
 | 853 | 	} | 
 | 854 |  | 
 | 855 | 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | 
 | 856 | 	if (err) { | 
 | 857 | 		if (err == UBI_IO_BITFLIPS) | 
 | 858 | 			err = 0; | 
 | 859 | 		goto error; | 
 | 860 | 	} | 
 | 861 |  | 
 | 862 | 	ubi_free_vid_hdr(ubi, vid_hdr); | 
 | 863 | 	spin_lock(&ubi->wl_lock); | 
 | 864 | 	if (!ubi->move_to_put) | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 865 | 		wl_tree_add(e2, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 866 | 	else | 
 | 867 | 		put = 1; | 
 | 868 | 	ubi->move_from = ubi->move_to = NULL; | 
 | 869 | 	ubi->move_from_put = ubi->move_to_put = 0; | 
 | 870 | 	ubi->wl_scheduled = 0; | 
 | 871 | 	spin_unlock(&ubi->wl_lock); | 
 | 872 |  | 
 | 873 | 	if (put) { | 
 | 874 | 		/* | 
 | 875 | 		 * Well, the target PEB was put meanwhile, schedule it for | 
 | 876 | 		 * erasure. | 
 | 877 | 		 */ | 
 | 878 | 		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); | 
 | 879 | 		err = schedule_erase(ubi, e2, 0); | 
 | 880 | 		if (err) { | 
 | 881 | 			kmem_cache_free(wl_entries_slab, e2); | 
 | 882 | 			ubi_ro_mode(ubi); | 
 | 883 | 		} | 
 | 884 | 	} | 
 | 885 |  | 
 | 886 | 	err = schedule_erase(ubi, e1, 0); | 
 | 887 | 	if (err) { | 
 | 888 | 		kmem_cache_free(wl_entries_slab, e1); | 
 | 889 | 		ubi_ro_mode(ubi); | 
 | 890 | 	} | 
 | 891 |  | 
 | 892 | 	dbg_wl("done"); | 
 | 893 | 	return err; | 
 | 894 |  | 
 | 895 | 	/* | 
 | 896 | 	 * Some error occurred. @e1 was not changed, so return it back. @e2 | 
 | 897 | 	 * might be changed, schedule it for erasure. | 
 | 898 | 	 */ | 
 | 899 | error: | 
 | 900 | 	if (err) | 
 | 901 | 		dbg_wl("error %d occurred, cancel operation", err); | 
 | 902 | 	ubi_assert(err <= 0); | 
 | 903 |  | 
 | 904 | 	ubi_free_vid_hdr(ubi, vid_hdr); | 
 | 905 | 	spin_lock(&ubi->wl_lock); | 
 | 906 | 	ubi->wl_scheduled = 0; | 
 | 907 | 	if (ubi->move_from_put) | 
 | 908 | 		put = 1; | 
 | 909 | 	else | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 910 | 		wl_tree_add(e1, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 911 | 	ubi->move_from = ubi->move_to = NULL; | 
 | 912 | 	ubi->move_from_put = ubi->move_to_put = 0; | 
 | 913 | 	spin_unlock(&ubi->wl_lock); | 
 | 914 |  | 
 | 915 | 	if (put) { | 
 | 916 | 		/* | 
 | 917 | 		 * Well, the target PEB was put meanwhile, schedule it for | 
 | 918 | 		 * erasure. | 
 | 919 | 		 */ | 
 | 920 | 		dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); | 
 | 921 | 		err = schedule_erase(ubi, e1, 0); | 
 | 922 | 		if (err) { | 
 | 923 | 			kmem_cache_free(wl_entries_slab, e1); | 
 | 924 | 			ubi_ro_mode(ubi); | 
 | 925 | 		} | 
 | 926 | 	} | 
 | 927 |  | 
 | 928 | 	err = schedule_erase(ubi, e2, 0); | 
 | 929 | 	if (err) { | 
 | 930 | 		kmem_cache_free(wl_entries_slab, e2); | 
 | 931 | 		ubi_ro_mode(ubi); | 
 | 932 | 	} | 
 | 933 |  | 
 | 934 | 	yield(); | 
 | 935 | 	return err; | 
 | 936 | } | 
 | 937 |  | 
 | 938 | /** | 
 | 939 |  * ensure_wear_leveling - schedule wear-leveling if it is needed. | 
 | 940 |  * @ubi: UBI device description object | 
 | 941 |  * | 
 | 942 |  * This function checks if it is time to start wear-leveling and schedules it | 
 | 943 |  * if yes. This function returns zero in case of success and a negative error | 
 | 944 |  * code in case of failure. | 
 | 945 |  */ | 
 | 946 | static int ensure_wear_leveling(struct ubi_device *ubi) | 
 | 947 | { | 
 | 948 | 	int err = 0; | 
 | 949 | 	struct ubi_wl_entry *e1; | 
 | 950 | 	struct ubi_wl_entry *e2; | 
 | 951 | 	struct ubi_work *wrk; | 
 | 952 |  | 
 | 953 | 	spin_lock(&ubi->wl_lock); | 
 | 954 | 	if (ubi->wl_scheduled) | 
 | 955 | 		/* Wear-leveling is already in the work queue */ | 
 | 956 | 		goto out_unlock; | 
 | 957 |  | 
 | 958 | 	/* | 
 | 959 | 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the | 
 | 960 | 	 * the WL worker has to be scheduled anyway. | 
 | 961 | 	 */ | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 962 | 	if (!ubi->scrub.rb_node) { | 
 | 963 | 		if (!ubi->used.rb_node || !ubi->free.rb_node) | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 964 | 			/* No physical eraseblocks - no deal */ | 
 | 965 | 			goto out_unlock; | 
 | 966 |  | 
 | 967 | 		/* | 
 | 968 | 		 * We schedule wear-leveling only if the difference between the | 
 | 969 | 		 * lowest erase counter of used physical eraseblocks and a high | 
 | 970 | 		 * erase counter of free physical eraseblocks is greater then | 
 | 971 | 		 * %UBI_WL_THRESHOLD. | 
 | 972 | 		 */ | 
 | 973 | 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); | 
 | 974 | 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 
 | 975 |  | 
 | 976 | 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) | 
 | 977 | 			goto out_unlock; | 
 | 978 | 		dbg_wl("schedule wear-leveling"); | 
 | 979 | 	} else | 
 | 980 | 		dbg_wl("schedule scrubbing"); | 
 | 981 |  | 
 | 982 | 	ubi->wl_scheduled = 1; | 
 | 983 | 	spin_unlock(&ubi->wl_lock); | 
 | 984 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 985 | 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 986 | 	if (!wrk) { | 
 | 987 | 		err = -ENOMEM; | 
 | 988 | 		goto out_cancel; | 
 | 989 | 	} | 
 | 990 |  | 
 | 991 | 	wrk->func = &wear_leveling_worker; | 
 | 992 | 	schedule_ubi_work(ubi, wrk); | 
 | 993 | 	return err; | 
 | 994 |  | 
 | 995 | out_cancel: | 
 | 996 | 	spin_lock(&ubi->wl_lock); | 
 | 997 | 	ubi->wl_scheduled = 0; | 
 | 998 | out_unlock: | 
 | 999 | 	spin_unlock(&ubi->wl_lock); | 
 | 1000 | 	return err; | 
 | 1001 | } | 
 | 1002 |  | 
 | 1003 | /** | 
 | 1004 |  * erase_worker - physical eraseblock erase worker function. | 
 | 1005 |  * @ubi: UBI device description object | 
 | 1006 |  * @wl_wrk: the work object | 
 | 1007 |  * @cancel: non-zero if the worker has to free memory and exit | 
 | 1008 |  * | 
 | 1009 |  * This function erases a physical eraseblock and perform torture testing if | 
 | 1010 |  * needed. It also takes care about marking the physical eraseblock bad if | 
 | 1011 |  * needed. Returns zero in case of success and a negative error code in case of | 
 | 1012 |  * failure. | 
 | 1013 |  */ | 
 | 1014 | static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | 
 | 1015 | 			int cancel) | 
 | 1016 | { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1017 | 	struct ubi_wl_entry *e = wl_wrk->e; | 
| Artem Bityutskiy | 784c145 | 2007-07-18 13:42:10 +0300 | [diff] [blame] | 1018 | 	int pnum = e->pnum, err, need; | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1019 |  | 
 | 1020 | 	if (cancel) { | 
 | 1021 | 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); | 
 | 1022 | 		kfree(wl_wrk); | 
 | 1023 | 		kmem_cache_free(wl_entries_slab, e); | 
 | 1024 | 		return 0; | 
 | 1025 | 	} | 
 | 1026 |  | 
 | 1027 | 	dbg_wl("erase PEB %d EC %d", pnum, e->ec); | 
 | 1028 |  | 
 | 1029 | 	err = sync_erase(ubi, e, wl_wrk->torture); | 
 | 1030 | 	if (!err) { | 
 | 1031 | 		/* Fine, we've erased it successfully */ | 
 | 1032 | 		kfree(wl_wrk); | 
 | 1033 |  | 
 | 1034 | 		spin_lock(&ubi->wl_lock); | 
 | 1035 | 		ubi->abs_ec += 1; | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1036 | 		wl_tree_add(e, &ubi->free); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1037 | 		spin_unlock(&ubi->wl_lock); | 
 | 1038 |  | 
 | 1039 | 		/* | 
 | 1040 | 		 * One more erase operation has happened, take care about protected | 
 | 1041 | 		 * physical eraseblocks. | 
 | 1042 | 		 */ | 
 | 1043 | 		check_protection_over(ubi); | 
 | 1044 |  | 
 | 1045 | 		/* And take care about wear-leveling */ | 
 | 1046 | 		err = ensure_wear_leveling(ubi); | 
 | 1047 | 		return err; | 
 | 1048 | 	} | 
 | 1049 |  | 
| Artem Bityutskiy | 8d2d401 | 2007-07-22 22:32:51 +0300 | [diff] [blame] | 1050 | 	ubi_err("failed to erase PEB %d, error %d", pnum, err); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1051 | 	kfree(wl_wrk); | 
 | 1052 | 	kmem_cache_free(wl_entries_slab, e); | 
 | 1053 |  | 
| Artem Bityutskiy | 784c145 | 2007-07-18 13:42:10 +0300 | [diff] [blame] | 1054 | 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || | 
 | 1055 | 	    err == -EBUSY) { | 
 | 1056 | 		int err1; | 
 | 1057 |  | 
 | 1058 | 		/* Re-schedule the LEB for erasure */ | 
 | 1059 | 		err1 = schedule_erase(ubi, e, 0); | 
 | 1060 | 		if (err1) { | 
 | 1061 | 			err = err1; | 
 | 1062 | 			goto out_ro; | 
 | 1063 | 		} | 
 | 1064 | 		return err; | 
 | 1065 | 	} else if (err != -EIO) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1066 | 		/* | 
 | 1067 | 		 * If this is not %-EIO, we have no idea what to do. Scheduling | 
 | 1068 | 		 * this physical eraseblock for erasure again would cause | 
 | 1069 | 		 * errors again and again. Well, lets switch to RO mode. | 
 | 1070 | 		 */ | 
| Artem Bityutskiy | 784c145 | 2007-07-18 13:42:10 +0300 | [diff] [blame] | 1071 | 		goto out_ro; | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1072 | 	} | 
 | 1073 |  | 
 | 1074 | 	/* It is %-EIO, the PEB went bad */ | 
 | 1075 |  | 
 | 1076 | 	if (!ubi->bad_allowed) { | 
 | 1077 | 		ubi_err("bad physical eraseblock %d detected", pnum); | 
| Artem Bityutskiy | 784c145 | 2007-07-18 13:42:10 +0300 | [diff] [blame] | 1078 | 		goto out_ro; | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1079 | 	} | 
 | 1080 |  | 
| Artem Bityutskiy | 784c145 | 2007-07-18 13:42:10 +0300 | [diff] [blame] | 1081 | 	spin_lock(&ubi->volumes_lock); | 
 | 1082 | 	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; | 
 | 1083 | 	if (need > 0) { | 
 | 1084 | 		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; | 
 | 1085 | 		ubi->avail_pebs -= need; | 
 | 1086 | 		ubi->rsvd_pebs += need; | 
 | 1087 | 		ubi->beb_rsvd_pebs += need; | 
 | 1088 | 		if (need > 0) | 
 | 1089 | 			ubi_msg("reserve more %d PEBs", need); | 
 | 1090 | 	} | 
 | 1091 |  | 
 | 1092 | 	if (ubi->beb_rsvd_pebs == 0) { | 
 | 1093 | 		spin_unlock(&ubi->volumes_lock); | 
 | 1094 | 		ubi_err("no reserved physical eraseblocks"); | 
 | 1095 | 		goto out_ro; | 
 | 1096 | 	} | 
 | 1097 |  | 
 | 1098 | 	spin_unlock(&ubi->volumes_lock); | 
 | 1099 | 	ubi_msg("mark PEB %d as bad", pnum); | 
 | 1100 |  | 
 | 1101 | 	err = ubi_io_mark_bad(ubi, pnum); | 
 | 1102 | 	if (err) | 
 | 1103 | 		goto out_ro; | 
 | 1104 |  | 
 | 1105 | 	spin_lock(&ubi->volumes_lock); | 
 | 1106 | 	ubi->beb_rsvd_pebs -= 1; | 
 | 1107 | 	ubi->bad_peb_count += 1; | 
 | 1108 | 	ubi->good_peb_count -= 1; | 
 | 1109 | 	ubi_calculate_reserved(ubi); | 
 | 1110 | 	if (ubi->beb_rsvd_pebs == 0) | 
 | 1111 | 		ubi_warn("last PEB from the reserved pool was used"); | 
 | 1112 | 	spin_unlock(&ubi->volumes_lock); | 
 | 1113 |  | 
 | 1114 | 	return err; | 
 | 1115 |  | 
 | 1116 | out_ro: | 
 | 1117 | 	ubi_ro_mode(ubi); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1118 | 	return err; | 
 | 1119 | } | 
 | 1120 |  | 
 | 1121 | /** | 
 | 1122 |  * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling | 
 | 1123 |  * unit. | 
 | 1124 |  * @ubi: UBI device description object | 
 | 1125 |  * @pnum: physical eraseblock to return | 
 | 1126 |  * @torture: if this physical eraseblock has to be tortured | 
 | 1127 |  * | 
 | 1128 |  * This function is called to return physical eraseblock @pnum to the pool of | 
 | 1129 |  * free physical eraseblocks. The @torture flag has to be set if an I/O error | 
 | 1130 |  * occurred to this @pnum and it has to be tested. This function returns zero | 
 | 1131 |  * in case of success and a negative error code in case of failure. | 
 | 1132 |  */ | 
 | 1133 | int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) | 
 | 1134 | { | 
 | 1135 | 	int err; | 
 | 1136 | 	struct ubi_wl_entry *e; | 
 | 1137 |  | 
 | 1138 | 	dbg_wl("PEB %d", pnum); | 
 | 1139 | 	ubi_assert(pnum >= 0); | 
 | 1140 | 	ubi_assert(pnum < ubi->peb_count); | 
 | 1141 |  | 
 | 1142 | 	spin_lock(&ubi->wl_lock); | 
 | 1143 |  | 
 | 1144 | 	e = ubi->lookuptbl[pnum]; | 
 | 1145 | 	if (e == ubi->move_from) { | 
 | 1146 | 		/* | 
 | 1147 | 		 * User is putting the physical eraseblock which was selected to | 
 | 1148 | 		 * be moved. It will be scheduled for erasure in the | 
 | 1149 | 		 * wear-leveling worker. | 
 | 1150 | 		 */ | 
 | 1151 | 		dbg_wl("PEB %d is being moved", pnum); | 
 | 1152 | 		ubi_assert(!ubi->move_from_put); | 
 | 1153 | 		ubi->move_from_put = 1; | 
 | 1154 | 		spin_unlock(&ubi->wl_lock); | 
 | 1155 | 		return 0; | 
 | 1156 | 	} else if (e == ubi->move_to) { | 
 | 1157 | 		/* | 
 | 1158 | 		 * User is putting the physical eraseblock which was selected | 
 | 1159 | 		 * as the target the data is moved to. It may happen if the EBA | 
 | 1160 | 		 * unit already re-mapped the LEB but the WL unit did has not | 
 | 1161 | 		 * put the PEB to the "used" tree. | 
 | 1162 | 		 */ | 
 | 1163 | 		dbg_wl("PEB %d is the target of data moving", pnum); | 
 | 1164 | 		ubi_assert(!ubi->move_to_put); | 
 | 1165 | 		ubi->move_to_put = 1; | 
 | 1166 | 		spin_unlock(&ubi->wl_lock); | 
 | 1167 | 		return 0; | 
 | 1168 | 	} else { | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1169 | 		if (in_wl_tree(e, &ubi->used)) { | 
 | 1170 | 			paranoid_check_in_wl_tree(e, &ubi->used); | 
 | 1171 | 			rb_erase(&e->rb, &ubi->used); | 
 | 1172 | 		} else if (in_wl_tree(e, &ubi->scrub)) { | 
 | 1173 | 			paranoid_check_in_wl_tree(e, &ubi->scrub); | 
 | 1174 | 			rb_erase(&e->rb, &ubi->scrub); | 
 | 1175 | 		} else | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1176 | 			prot_tree_del(ubi, e->pnum); | 
 | 1177 | 	} | 
 | 1178 | 	spin_unlock(&ubi->wl_lock); | 
 | 1179 |  | 
 | 1180 | 	err = schedule_erase(ubi, e, torture); | 
 | 1181 | 	if (err) { | 
 | 1182 | 		spin_lock(&ubi->wl_lock); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1183 | 		wl_tree_add(e, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1184 | 		spin_unlock(&ubi->wl_lock); | 
 | 1185 | 	} | 
 | 1186 |  | 
 | 1187 | 	return err; | 
 | 1188 | } | 
 | 1189 |  | 
 | 1190 | /** | 
 | 1191 |  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. | 
 | 1192 |  * @ubi: UBI device description object | 
 | 1193 |  * @pnum: the physical eraseblock to schedule | 
 | 1194 |  * | 
 | 1195 |  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock | 
 | 1196 |  * needs scrubbing. This function schedules a physical eraseblock for | 
 | 1197 |  * scrubbing which is done in background. This function returns zero in case of | 
 | 1198 |  * success and a negative error code in case of failure. | 
 | 1199 |  */ | 
 | 1200 | int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | 
 | 1201 | { | 
 | 1202 | 	struct ubi_wl_entry *e; | 
 | 1203 |  | 
 | 1204 | 	ubi_msg("schedule PEB %d for scrubbing", pnum); | 
 | 1205 |  | 
 | 1206 | retry: | 
 | 1207 | 	spin_lock(&ubi->wl_lock); | 
 | 1208 | 	e = ubi->lookuptbl[pnum]; | 
 | 1209 | 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { | 
 | 1210 | 		spin_unlock(&ubi->wl_lock); | 
 | 1211 | 		return 0; | 
 | 1212 | 	} | 
 | 1213 |  | 
 | 1214 | 	if (e == ubi->move_to) { | 
 | 1215 | 		/* | 
 | 1216 | 		 * This physical eraseblock was used to move data to. The data | 
 | 1217 | 		 * was moved but the PEB was not yet inserted to the proper | 
 | 1218 | 		 * tree. We should just wait a little and let the WL worker | 
 | 1219 | 		 * proceed. | 
 | 1220 | 		 */ | 
 | 1221 | 		spin_unlock(&ubi->wl_lock); | 
 | 1222 | 		dbg_wl("the PEB %d is not in proper tree, retry", pnum); | 
 | 1223 | 		yield(); | 
 | 1224 | 		goto retry; | 
 | 1225 | 	} | 
 | 1226 |  | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1227 | 	if (in_wl_tree(e, &ubi->used)) { | 
 | 1228 | 		paranoid_check_in_wl_tree(e, &ubi->used); | 
 | 1229 | 		rb_erase(&e->rb, &ubi->used); | 
 | 1230 | 	} else | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1231 | 		prot_tree_del(ubi, pnum); | 
 | 1232 |  | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1233 | 	wl_tree_add(e, &ubi->scrub); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1234 | 	spin_unlock(&ubi->wl_lock); | 
 | 1235 |  | 
 | 1236 | 	/* | 
 | 1237 | 	 * Technically scrubbing is the same as wear-leveling, so it is done | 
 | 1238 | 	 * by the WL worker. | 
 | 1239 | 	 */ | 
 | 1240 | 	return ensure_wear_leveling(ubi); | 
 | 1241 | } | 
 | 1242 |  | 
 | 1243 | /** | 
 | 1244 |  * ubi_wl_flush - flush all pending works. | 
 | 1245 |  * @ubi: UBI device description object | 
 | 1246 |  * | 
 | 1247 |  * This function returns zero in case of success and a negative error code in | 
 | 1248 |  * case of failure. | 
 | 1249 |  */ | 
 | 1250 | int ubi_wl_flush(struct ubi_device *ubi) | 
 | 1251 | { | 
 | 1252 | 	int err, pending_count; | 
 | 1253 |  | 
 | 1254 | 	pending_count = ubi->works_count; | 
 | 1255 |  | 
 | 1256 | 	dbg_wl("flush (%d pending works)", pending_count); | 
 | 1257 |  | 
 | 1258 | 	/* | 
 | 1259 | 	 * Erase while the pending works queue is not empty, but not more then | 
 | 1260 | 	 * the number of currently pending works. | 
 | 1261 | 	 */ | 
 | 1262 | 	while (pending_count-- > 0) { | 
 | 1263 | 		err = do_work(ubi); | 
 | 1264 | 		if (err) | 
 | 1265 | 			return err; | 
 | 1266 | 	} | 
 | 1267 |  | 
 | 1268 | 	return 0; | 
 | 1269 | } | 
 | 1270 |  | 
 | 1271 | /** | 
 | 1272 |  * tree_destroy - destroy an RB-tree. | 
 | 1273 |  * @root: the root of the tree to destroy | 
 | 1274 |  */ | 
 | 1275 | static void tree_destroy(struct rb_root *root) | 
 | 1276 | { | 
 | 1277 | 	struct rb_node *rb; | 
 | 1278 | 	struct ubi_wl_entry *e; | 
 | 1279 |  | 
 | 1280 | 	rb = root->rb_node; | 
 | 1281 | 	while (rb) { | 
 | 1282 | 		if (rb->rb_left) | 
 | 1283 | 			rb = rb->rb_left; | 
 | 1284 | 		else if (rb->rb_right) | 
 | 1285 | 			rb = rb->rb_right; | 
 | 1286 | 		else { | 
 | 1287 | 			e = rb_entry(rb, struct ubi_wl_entry, rb); | 
 | 1288 |  | 
 | 1289 | 			rb = rb_parent(rb); | 
 | 1290 | 			if (rb) { | 
 | 1291 | 				if (rb->rb_left == &e->rb) | 
 | 1292 | 					rb->rb_left = NULL; | 
 | 1293 | 				else | 
 | 1294 | 					rb->rb_right = NULL; | 
 | 1295 | 			} | 
 | 1296 |  | 
 | 1297 | 			kmem_cache_free(wl_entries_slab, e); | 
 | 1298 | 		} | 
 | 1299 | 	} | 
 | 1300 | } | 
 | 1301 |  | 
 | 1302 | /** | 
 | 1303 |  * ubi_thread - UBI background thread. | 
 | 1304 |  * @u: the UBI device description object pointer | 
 | 1305 |  */ | 
 | 1306 | static int ubi_thread(void *u) | 
 | 1307 | { | 
 | 1308 | 	int failures = 0; | 
 | 1309 | 	struct ubi_device *ubi = u; | 
 | 1310 |  | 
 | 1311 | 	ubi_msg("background thread \"%s\" started, PID %d", | 
| Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1312 | 		ubi->bgt_name, task_pid_nr(current)); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1313 |  | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 1314 | 	set_freezable(); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1315 | 	for (;;) { | 
 | 1316 | 		int err; | 
 | 1317 |  | 
 | 1318 | 		if (kthread_should_stop()) | 
 | 1319 | 			goto out; | 
 | 1320 |  | 
 | 1321 | 		if (try_to_freeze()) | 
 | 1322 | 			continue; | 
 | 1323 |  | 
 | 1324 | 		spin_lock(&ubi->wl_lock); | 
 | 1325 | 		if (list_empty(&ubi->works) || ubi->ro_mode || | 
 | 1326 | 			       !ubi->thread_enabled) { | 
 | 1327 | 			set_current_state(TASK_INTERRUPTIBLE); | 
 | 1328 | 			spin_unlock(&ubi->wl_lock); | 
 | 1329 | 			schedule(); | 
 | 1330 | 			continue; | 
 | 1331 | 		} | 
 | 1332 | 		spin_unlock(&ubi->wl_lock); | 
 | 1333 |  | 
 | 1334 | 		err = do_work(ubi); | 
 | 1335 | 		if (err) { | 
 | 1336 | 			ubi_err("%s: work failed with error code %d", | 
 | 1337 | 				ubi->bgt_name, err); | 
 | 1338 | 			if (failures++ > WL_MAX_FAILURES) { | 
 | 1339 | 				/* | 
 | 1340 | 				 * Too many failures, disable the thread and | 
 | 1341 | 				 * switch to read-only mode. | 
 | 1342 | 				 */ | 
 | 1343 | 				ubi_msg("%s: %d consecutive failures", | 
 | 1344 | 					ubi->bgt_name, WL_MAX_FAILURES); | 
 | 1345 | 				ubi_ro_mode(ubi); | 
 | 1346 | 				break; | 
 | 1347 | 			} | 
 | 1348 | 		} else | 
 | 1349 | 			failures = 0; | 
 | 1350 |  | 
 | 1351 | 		cond_resched(); | 
 | 1352 | 	} | 
 | 1353 |  | 
 | 1354 | out: | 
 | 1355 | 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); | 
 | 1356 | 	return 0; | 
 | 1357 | } | 
 | 1358 |  | 
 | 1359 | /** | 
 | 1360 |  * cancel_pending - cancel all pending works. | 
 | 1361 |  * @ubi: UBI device description object | 
 | 1362 |  */ | 
 | 1363 | static void cancel_pending(struct ubi_device *ubi) | 
 | 1364 | { | 
 | 1365 | 	while (!list_empty(&ubi->works)) { | 
 | 1366 | 		struct ubi_work *wrk; | 
 | 1367 |  | 
 | 1368 | 		wrk = list_entry(ubi->works.next, struct ubi_work, list); | 
 | 1369 | 		list_del(&wrk->list); | 
 | 1370 | 		wrk->func(ubi, wrk, 1); | 
 | 1371 | 		ubi->works_count -= 1; | 
 | 1372 | 		ubi_assert(ubi->works_count >= 0); | 
 | 1373 | 	} | 
 | 1374 | } | 
 | 1375 |  | 
 | 1376 | /** | 
 | 1377 |  * ubi_wl_init_scan - initialize the wear-leveling unit using scanning | 
 | 1378 |  * information. | 
 | 1379 |  * @ubi: UBI device description object | 
 | 1380 |  * @si: scanning information | 
 | 1381 |  * | 
 | 1382 |  * This function returns zero in case of success, and a negative error code in | 
 | 1383 |  * case of failure. | 
 | 1384 |  */ | 
 | 1385 | int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | 
 | 1386 | { | 
 | 1387 | 	int err; | 
 | 1388 | 	struct rb_node *rb1, *rb2; | 
 | 1389 | 	struct ubi_scan_volume *sv; | 
 | 1390 | 	struct ubi_scan_leb *seb, *tmp; | 
 | 1391 | 	struct ubi_wl_entry *e; | 
 | 1392 |  | 
 | 1393 |  | 
 | 1394 | 	ubi->used = ubi->free = ubi->scrub = RB_ROOT; | 
 | 1395 | 	ubi->prot.pnum = ubi->prot.aec = RB_ROOT; | 
 | 1396 | 	spin_lock_init(&ubi->wl_lock); | 
 | 1397 | 	ubi->max_ec = si->max_ec; | 
 | 1398 | 	INIT_LIST_HEAD(&ubi->works); | 
 | 1399 |  | 
 | 1400 | 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); | 
 | 1401 |  | 
 | 1402 | 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | 
 | 1403 | 	if (IS_ERR(ubi->bgt_thread)) { | 
 | 1404 | 		err = PTR_ERR(ubi->bgt_thread); | 
 | 1405 | 		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, | 
 | 1406 | 			err); | 
 | 1407 | 		return err; | 
 | 1408 | 	} | 
 | 1409 |  | 
 | 1410 | 	if (ubi_devices_cnt == 0) { | 
 | 1411 | 		wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", | 
 | 1412 | 						    sizeof(struct ubi_wl_entry), | 
| Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1413 | 						    0, 0, NULL); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1414 | 		if (!wl_entries_slab) | 
 | 1415 | 			return -ENOMEM; | 
 | 1416 | 	} | 
 | 1417 |  | 
 | 1418 | 	err = -ENOMEM; | 
 | 1419 | 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); | 
 | 1420 | 	if (!ubi->lookuptbl) | 
 | 1421 | 		goto out_free; | 
 | 1422 |  | 
 | 1423 | 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { | 
 | 1424 | 		cond_resched(); | 
 | 1425 |  | 
 | 1426 | 		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | 
 | 1427 | 		if (!e) | 
 | 1428 | 			goto out_free; | 
 | 1429 |  | 
 | 1430 | 		e->pnum = seb->pnum; | 
 | 1431 | 		e->ec = seb->ec; | 
 | 1432 | 		ubi->lookuptbl[e->pnum] = e; | 
 | 1433 | 		if (schedule_erase(ubi, e, 0)) { | 
 | 1434 | 			kmem_cache_free(wl_entries_slab, e); | 
 | 1435 | 			goto out_free; | 
 | 1436 | 		} | 
 | 1437 | 	} | 
 | 1438 |  | 
 | 1439 | 	list_for_each_entry(seb, &si->free, u.list) { | 
 | 1440 | 		cond_resched(); | 
 | 1441 |  | 
 | 1442 | 		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | 
 | 1443 | 		if (!e) | 
 | 1444 | 			goto out_free; | 
 | 1445 |  | 
 | 1446 | 		e->pnum = seb->pnum; | 
 | 1447 | 		e->ec = seb->ec; | 
 | 1448 | 		ubi_assert(e->ec >= 0); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1449 | 		wl_tree_add(e, &ubi->free); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1450 | 		ubi->lookuptbl[e->pnum] = e; | 
 | 1451 | 	} | 
 | 1452 |  | 
 | 1453 | 	list_for_each_entry(seb, &si->corr, u.list) { | 
 | 1454 | 		cond_resched(); | 
 | 1455 |  | 
 | 1456 | 		e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | 
 | 1457 | 		if (!e) | 
 | 1458 | 			goto out_free; | 
 | 1459 |  | 
 | 1460 | 		e->pnum = seb->pnum; | 
 | 1461 | 		e->ec = seb->ec; | 
 | 1462 | 		ubi->lookuptbl[e->pnum] = e; | 
 | 1463 | 		if (schedule_erase(ubi, e, 0)) { | 
 | 1464 | 			kmem_cache_free(wl_entries_slab, e); | 
 | 1465 | 			goto out_free; | 
 | 1466 | 		} | 
 | 1467 | 	} | 
 | 1468 |  | 
 | 1469 | 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { | 
 | 1470 | 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { | 
 | 1471 | 			cond_resched(); | 
 | 1472 |  | 
 | 1473 | 			e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); | 
 | 1474 | 			if (!e) | 
 | 1475 | 				goto out_free; | 
 | 1476 |  | 
 | 1477 | 			e->pnum = seb->pnum; | 
 | 1478 | 			e->ec = seb->ec; | 
 | 1479 | 			ubi->lookuptbl[e->pnum] = e; | 
 | 1480 | 			if (!seb->scrub) { | 
 | 1481 | 				dbg_wl("add PEB %d EC %d to the used tree", | 
 | 1482 | 				       e->pnum, e->ec); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1483 | 				wl_tree_add(e, &ubi->used); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1484 | 			} else { | 
 | 1485 | 				dbg_wl("add PEB %d EC %d to the scrub tree", | 
 | 1486 | 				       e->pnum, e->ec); | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1487 | 				wl_tree_add(e, &ubi->scrub); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1488 | 			} | 
 | 1489 | 		} | 
 | 1490 | 	} | 
 | 1491 |  | 
| Artem Bityutskiy | 5abde38 | 2007-09-13 14:48:20 +0300 | [diff] [blame] | 1492 | 	if (ubi->avail_pebs < WL_RESERVED_PEBS) { | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1493 | 		ubi_err("no enough physical eraseblocks (%d, need %d)", | 
 | 1494 | 			ubi->avail_pebs, WL_RESERVED_PEBS); | 
 | 1495 | 		goto out_free; | 
 | 1496 | 	} | 
 | 1497 | 	ubi->avail_pebs -= WL_RESERVED_PEBS; | 
 | 1498 | 	ubi->rsvd_pebs += WL_RESERVED_PEBS; | 
 | 1499 |  | 
 | 1500 | 	/* Schedule wear-leveling if needed */ | 
 | 1501 | 	err = ensure_wear_leveling(ubi); | 
 | 1502 | 	if (err) | 
 | 1503 | 		goto out_free; | 
 | 1504 |  | 
 | 1505 | 	return 0; | 
 | 1506 |  | 
 | 1507 | out_free: | 
 | 1508 | 	cancel_pending(ubi); | 
 | 1509 | 	tree_destroy(&ubi->used); | 
 | 1510 | 	tree_destroy(&ubi->free); | 
 | 1511 | 	tree_destroy(&ubi->scrub); | 
 | 1512 | 	kfree(ubi->lookuptbl); | 
 | 1513 | 	if (ubi_devices_cnt == 0) | 
 | 1514 | 		kmem_cache_destroy(wl_entries_slab); | 
 | 1515 | 	return err; | 
 | 1516 | } | 
 | 1517 |  | 
 | 1518 | /** | 
 | 1519 |  * protection_trees_destroy - destroy the protection RB-trees. | 
 | 1520 |  * @ubi: UBI device description object | 
 | 1521 |  */ | 
 | 1522 | static void protection_trees_destroy(struct ubi_device *ubi) | 
 | 1523 | { | 
 | 1524 | 	struct rb_node *rb; | 
 | 1525 | 	struct ubi_wl_prot_entry *pe; | 
 | 1526 |  | 
 | 1527 | 	rb = ubi->prot.aec.rb_node; | 
 | 1528 | 	while (rb) { | 
 | 1529 | 		if (rb->rb_left) | 
 | 1530 | 			rb = rb->rb_left; | 
 | 1531 | 		else if (rb->rb_right) | 
 | 1532 | 			rb = rb->rb_right; | 
 | 1533 | 		else { | 
 | 1534 | 			pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); | 
 | 1535 |  | 
 | 1536 | 			rb = rb_parent(rb); | 
 | 1537 | 			if (rb) { | 
 | 1538 | 				if (rb->rb_left == &pe->rb_aec) | 
 | 1539 | 					rb->rb_left = NULL; | 
 | 1540 | 				else | 
 | 1541 | 					rb->rb_right = NULL; | 
 | 1542 | 			} | 
 | 1543 |  | 
 | 1544 | 			kmem_cache_free(wl_entries_slab, pe->e); | 
 | 1545 | 			kfree(pe); | 
 | 1546 | 		} | 
 | 1547 | 	} | 
 | 1548 | } | 
 | 1549 |  | 
 | 1550 | /** | 
 | 1551 |  * ubi_wl_close - close the wear-leveling unit. | 
 | 1552 |  * @ubi: UBI device description object | 
 | 1553 |  */ | 
 | 1554 | void ubi_wl_close(struct ubi_device *ubi) | 
 | 1555 | { | 
 | 1556 | 	dbg_wl("disable \"%s\"", ubi->bgt_name); | 
 | 1557 | 	if (ubi->bgt_thread) | 
 | 1558 | 		kthread_stop(ubi->bgt_thread); | 
 | 1559 |  | 
 | 1560 | 	dbg_wl("close the UBI wear-leveling unit"); | 
 | 1561 |  | 
 | 1562 | 	cancel_pending(ubi); | 
 | 1563 | 	protection_trees_destroy(ubi); | 
 | 1564 | 	tree_destroy(&ubi->used); | 
 | 1565 | 	tree_destroy(&ubi->free); | 
 | 1566 | 	tree_destroy(&ubi->scrub); | 
 | 1567 | 	kfree(ubi->lookuptbl); | 
 | 1568 | 	if (ubi_devices_cnt == 1) | 
 | 1569 | 		kmem_cache_destroy(wl_entries_slab); | 
 | 1570 | } | 
 | 1571 |  | 
 | 1572 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 
 | 1573 |  | 
 | 1574 | /** | 
 | 1575 |  * paranoid_check_ec - make sure that the erase counter of a physical eraseblock | 
 | 1576 |  * is correct. | 
 | 1577 |  * @ubi: UBI device description object | 
 | 1578 |  * @pnum: the physical eraseblock number to check | 
 | 1579 |  * @ec: the erase counter to check | 
 | 1580 |  * | 
 | 1581 |  * This function returns zero if the erase counter of physical eraseblock @pnum | 
 | 1582 |  * is equivalent to @ec, %1 if not, and a negative error code if an error | 
 | 1583 |  * occurred. | 
 | 1584 |  */ | 
| Artem Bityutskiy | e88d6e10 | 2007-08-29 14:51:52 +0300 | [diff] [blame] | 1585 | static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1586 | { | 
 | 1587 | 	int err; | 
 | 1588 | 	long long read_ec; | 
 | 1589 | 	struct ubi_ec_hdr *ec_hdr; | 
 | 1590 |  | 
| Artem Bityutskiy | 33818bb | 2007-08-28 21:29:32 +0300 | [diff] [blame] | 1591 | 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1592 | 	if (!ec_hdr) | 
 | 1593 | 		return -ENOMEM; | 
 | 1594 |  | 
 | 1595 | 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); | 
 | 1596 | 	if (err && err != UBI_IO_BITFLIPS) { | 
 | 1597 | 		/* The header does not have to exist */ | 
 | 1598 | 		err = 0; | 
 | 1599 | 		goto out_free; | 
 | 1600 | 	} | 
 | 1601 |  | 
| Christoph Hellwig | 3261ebd | 2007-05-21 17:41:46 +0300 | [diff] [blame] | 1602 | 	read_ec = be64_to_cpu(ec_hdr->ec); | 
| Artem B. Bityutskiy | 801c135 | 2006-06-27 12:22:22 +0400 | [diff] [blame] | 1603 | 	if (ec != read_ec) { | 
 | 1604 | 		ubi_err("paranoid check failed for PEB %d", pnum); | 
 | 1605 | 		ubi_err("read EC is %lld, should be %d", read_ec, ec); | 
 | 1606 | 		ubi_dbg_dump_stack(); | 
 | 1607 | 		err = 1; | 
 | 1608 | 	} else | 
 | 1609 | 		err = 0; | 
 | 1610 |  | 
 | 1611 | out_free: | 
 | 1612 | 	kfree(ec_hdr); | 
 | 1613 | 	return err; | 
 | 1614 | } | 
 | 1615 |  | 
 | 1616 | /** | 
 | 1617 |  * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present | 
 | 1618 |  * in a WL RB-tree. | 
 | 1619 |  * @e: the wear-leveling entry to check | 
 | 1620 |  * @root: the root of the tree | 
 | 1621 |  * | 
 | 1622 |  * This function returns zero if @e is in the @root RB-tree and %1 if it | 
 | 1623 |  * is not. | 
 | 1624 |  */ | 
 | 1625 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | 
 | 1626 | 				     struct rb_root *root) | 
 | 1627 | { | 
 | 1628 | 	if (in_wl_tree(e, root)) | 
 | 1629 | 		return 0; | 
 | 1630 |  | 
 | 1631 | 	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", | 
 | 1632 | 		e->pnum, e->ec, root); | 
 | 1633 | 	ubi_dbg_dump_stack(); | 
 | 1634 | 	return 1; | 
 | 1635 | } | 
 | 1636 |  | 
 | 1637 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ |