blob: 0279bf9dc722a8e5eda1fdba198499fe9b21f9f5 [file] [log] [blame]
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19 */
20
21/*
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030022 * UBI wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040023 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030024 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040030 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030032 * header. The rest of the physical eraseblock contains only %0xFF bytes.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040033 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030034 * When physical eraseblocks are returned to the WL sub-system by means of the
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040035 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030037 * which is also managed by the WL sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040038 *
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
42 *
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030046 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040048 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030049 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50 * bad.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040051 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030052 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040055 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030056 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used,
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040059 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
60 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good
63 * enough for moderately large flashes and it is simple. In future, one may
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030064 * re-work this sub-system and make it more scalable.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040065 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030066 * At the moment this sub-system does not utilize the sequence number, which
67 * was introduced relatively recently. But it would be wise to do this because
68 * the sequence number of a logical eraseblock characterizes how old is it. For
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040069 * example, when we move a PEB with low erase counter, and we need to pick the
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030072 * room for future re-works of the WL sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040073 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030074 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040076 */
77
78#include <linux/slab.h>
79#include <linux/crc32.h>
80#include <linux/freezer.h>
81#include <linux/kthread.h>
82#include "ubi.h"
83
84/* Number of physical eraseblocks reserved for wear-leveling purposes */
85#define WL_RESERVED_PEBS 1
86
87/*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91#define ST_PROTECTION 16
92#define U_PROTECTION 10
93#define LT_PROTECTION 4
94
95/*
96 * Maximum difference between two erase counters. If this threshold is
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030097 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high
99 * erase counter.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400100 */
101#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
102
103/*
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300104 * When a physical eraseblock is moved, the WL sub-system has to pick the target
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400105 * physical eraseblock to move to. The simplest way would be just to pick the
106 * one with the highest erase counter. But in certain workloads this could lead
107 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
108 * situation when the picked physical eraseblock is constantly erased after the
109 * data is written to it. So, we have a constant which limits the highest erase
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300110 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
111 * does not pick eraseblocks with erase counter greater then the lowest erase
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400112 * counter plus %WL_FREE_MAX_DIFF.
113 */
114#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
115
116/*
117 * Maximum number of consecutive background thread failures which is enough to
118 * switch to read-only mode.
119 */
120#define WL_MAX_FAILURES 32
121
122/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174};
175
176/**
177 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works
179 * @func: worker function
180 * @priv: private data of the worker function
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400181 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured
183 *
184 * The @func pointer points to the worker function. If the @cancel argument is
185 * not zero, the worker has to free the resources and exit immediately. The
186 * worker has to return zero in case of success and a negative error code in
187 * case of failure.
188 */
189struct ubi_work {
190 struct list_head list;
191 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
192 /* The below fields are only relevant to erasure works */
193 struct ubi_wl_entry *e;
194 int torture;
195};
196
197#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300198static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400199static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root);
201#else
202#define paranoid_check_ec(ubi, pnum, ec) 0
203#define paranoid_check_in_wl_tree(e, root)
204#endif
205
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400206/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400207 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
208 * @e: the wear-leveling entry to add
209 * @root: the root of the tree
210 *
211 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
212 * the @ubi->used and @ubi->free RB-trees.
213 */
214static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
215{
216 struct rb_node **p, *parent = NULL;
217
218 p = &root->rb_node;
219 while (*p) {
220 struct ubi_wl_entry *e1;
221
222 parent = *p;
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800223 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400224
225 if (e->ec < e1->ec)
226 p = &(*p)->rb_left;
227 else if (e->ec > e1->ec)
228 p = &(*p)->rb_right;
229 else {
230 ubi_assert(e->pnum != e1->pnum);
231 if (e->pnum < e1->pnum)
232 p = &(*p)->rb_left;
233 else
234 p = &(*p)->rb_right;
235 }
236 }
237
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800238 rb_link_node(&e->u.rb, parent, p);
239 rb_insert_color(&e->u.rb, root);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400240}
241
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400242/**
243 * do_work - do one pending work.
244 * @ubi: UBI device description object
245 *
246 * This function returns zero in case of success and a negative error code in
247 * case of failure.
248 */
249static int do_work(struct ubi_device *ubi)
250{
251 int err;
252 struct ubi_work *wrk;
253
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200254 cond_resched();
255
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200256 /*
257 * @ubi->work_sem is used to synchronize with the workers. Workers take
258 * it in read mode, so many of them may be doing works at a time. But
259 * the queue flush code has to be sure the whole queue of works is
260 * done, and it takes the mutex in write mode.
261 */
262 down_read(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400263 spin_lock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400264 if (list_empty(&ubi->works)) {
265 spin_unlock(&ubi->wl_lock);
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200266 up_read(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400267 return 0;
268 }
269
270 wrk = list_entry(ubi->works.next, struct ubi_work, list);
271 list_del(&wrk->list);
Artem Bityutskiy16f557e2007-12-19 16:03:17 +0200272 ubi->works_count -= 1;
273 ubi_assert(ubi->works_count >= 0);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400274 spin_unlock(&ubi->wl_lock);
275
276 /*
277 * Call the worker function. Do not touch the work structure
278 * after this call as it will have been freed or reused by that
279 * time by the worker function.
280 */
281 err = wrk->func(ubi, wrk, 0);
282 if (err)
283 ubi_err("work failed with error code %d", err);
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200284 up_read(&ubi->work_sem);
Artem Bityutskiy16f557e2007-12-19 16:03:17 +0200285
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400286 return err;
287}
288
289/**
290 * produce_free_peb - produce a free physical eraseblock.
291 * @ubi: UBI device description object
292 *
293 * This function tries to make a free PEB by means of synchronous execution of
294 * pending works. This may be needed if, for example the background thread is
295 * disabled. Returns zero in case of success and a negative error code in case
296 * of failure.
297 */
298static int produce_free_peb(struct ubi_device *ubi)
299{
300 int err;
301
302 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300303 while (!ubi->free.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400304 spin_unlock(&ubi->wl_lock);
305
306 dbg_wl("do one work synchronously");
307 err = do_work(ubi);
308 if (err)
309 return err;
310
311 spin_lock(&ubi->wl_lock);
312 }
313 spin_unlock(&ubi->wl_lock);
314
315 return 0;
316}
317
318/**
319 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
320 * @e: the wear-leveling entry to check
321 * @root: the root of the tree
322 *
323 * This function returns non-zero if @e is in the @root RB-tree and zero if it
324 * is not.
325 */
326static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
327{
328 struct rb_node *p;
329
330 p = root->rb_node;
331 while (p) {
332 struct ubi_wl_entry *e1;
333
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800334 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400335
336 if (e->pnum == e1->pnum) {
337 ubi_assert(e == e1);
338 return 1;
339 }
340
341 if (e->ec < e1->ec)
342 p = p->rb_left;
343 else if (e->ec > e1->ec)
344 p = p->rb_right;
345 else {
346 ubi_assert(e->pnum != e1->pnum);
347 if (e->pnum < e1->pnum)
348 p = p->rb_left;
349 else
350 p = p->rb_right;
351 }
352 }
353
354 return 0;
355}
356
357/**
358 * prot_tree_add - add physical eraseblock to protection trees.
359 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200362 * @ec: for how many erase operations this PEB should be protected
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400363 *
364 * @wl->lock has to be locked.
365 */
366static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200367 struct ubi_wl_prot_entry *pe, int ec)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400368{
369 struct rb_node **p, *parent = NULL;
370 struct ubi_wl_prot_entry *pe1;
371
372 pe->e = e;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200373 pe->abs_ec = ubi->abs_ec + ec;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400374
375 p = &ubi->prot.pnum.rb_node;
376 while (*p) {
377 parent = *p;
378 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
379
380 if (e->pnum < pe1->e->pnum)
381 p = &(*p)->rb_left;
382 else
383 p = &(*p)->rb_right;
384 }
385 rb_link_node(&pe->rb_pnum, parent, p);
386 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
387
388 p = &ubi->prot.aec.rb_node;
389 parent = NULL;
390 while (*p) {
391 parent = *p;
392 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
393
394 if (pe->abs_ec < pe1->abs_ec)
395 p = &(*p)->rb_left;
396 else
397 p = &(*p)->rb_right;
398 }
399 rb_link_node(&pe->rb_aec, parent, p);
400 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
401}
402
403/**
404 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
405 * @root: the RB-tree where to look for
406 * @max: highest possible erase counter
407 *
408 * This function looks for a wear leveling entry with erase counter closest to
409 * @max and less then @max.
410 */
411static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
412{
413 struct rb_node *p;
414 struct ubi_wl_entry *e;
415
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800416 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400417 max += e->ec;
418
419 p = root->rb_node;
420 while (p) {
421 struct ubi_wl_entry *e1;
422
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800423 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400424 if (e1->ec >= max)
425 p = p->rb_left;
426 else {
427 p = p->rb_right;
428 e = e1;
429 }
430 }
431
432 return e;
433}
434
435/**
436 * ubi_wl_get_peb - get a physical eraseblock.
437 * @ubi: UBI device description object
438 * @dtype: type of data which will be stored in this physical eraseblock
439 *
440 * This function returns a physical eraseblock in case of success and a
441 * negative error code in case of failure. Might sleep.
442 */
443int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
444{
445 int err, protect, medium_ec;
446 struct ubi_wl_entry *e, *first, *last;
447 struct ubi_wl_prot_entry *pe;
448
449 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
450 dtype == UBI_UNKNOWN);
451
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300452 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400453 if (!pe)
454 return -ENOMEM;
455
456retry:
457 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300458 if (!ubi->free.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400459 if (ubi->works_count == 0) {
460 ubi_assert(list_empty(&ubi->works));
461 ubi_err("no free eraseblocks");
462 spin_unlock(&ubi->wl_lock);
463 kfree(pe);
464 return -ENOSPC;
465 }
466 spin_unlock(&ubi->wl_lock);
467
468 err = produce_free_peb(ubi);
469 if (err < 0) {
470 kfree(pe);
471 return err;
472 }
473 goto retry;
474 }
475
476 switch (dtype) {
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300477 case UBI_LONGTERM:
478 /*
479 * For long term data we pick a physical eraseblock with high
480 * erase counter. But the highest erase counter we can pick is
481 * bounded by the the lowest erase counter plus
482 * %WL_FREE_MAX_DIFF.
483 */
484 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
485 protect = LT_PROTECTION;
486 break;
487 case UBI_UNKNOWN:
488 /*
489 * For unknown data we pick a physical eraseblock with medium
490 * erase counter. But we by no means can pick a physical
491 * eraseblock with erase counter greater or equivalent than the
492 * lowest erase counter plus %WL_FREE_MAX_DIFF.
493 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800494 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
495 u.rb);
496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400497
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300498 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
499 e = rb_entry(ubi->free.rb_node,
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800500 struct ubi_wl_entry, u.rb);
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300501 else {
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec);
504 }
505 protect = U_PROTECTION;
506 break;
507 case UBI_SHORTTERM:
508 /*
509 * For short term data we pick a physical eraseblock with the
510 * lowest erase counter as we expect it will be erased soon.
511 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300513 protect = ST_PROTECTION;
514 break;
515 default:
516 protect = 0;
517 e = NULL;
518 BUG();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400519 }
520
521 /*
522 * Move the physical eraseblock to the protection trees where it will
523 * be protected from being moved for some time.
524 */
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300525 paranoid_check_in_wl_tree(e, &ubi->free);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800526 rb_erase(&e->u.rb, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400527 prot_tree_add(ubi, e, pe, protect);
528
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock);
531
532 return e->pnum;
533}
534
535/**
536 * prot_tree_del - remove a physical eraseblock from the protection trees
537 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200539 *
540 * This function returns PEB @pnum from the protection trees and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection
542 * trees.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400543 */
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200544static int prot_tree_del(struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400545{
546 struct rb_node *p;
547 struct ubi_wl_prot_entry *pe = NULL;
548
549 p = ubi->prot.pnum.rb_node;
550 while (p) {
551
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
553
554 if (pnum == pe->e->pnum)
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200555 goto found;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400556
557 if (pnum < pe->e->pnum)
558 p = p->rb_left;
559 else
560 p = p->rb_right;
561 }
562
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200563 return -ENODEV;
564
565found:
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400566 ubi_assert(pe->e->pnum == pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
569 kfree(pe);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200570 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400571}
572
573/**
574 * sync_erase - synchronously erase a physical eraseblock.
575 * @ubi: UBI device description object
576 * @e: the the physical eraseblock to erase
577 * @torture: if the physical eraseblock has to be tortured
578 *
579 * This function returns zero in case of success and a negative error code in
580 * case of failure.
581 */
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300582static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583 int torture)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400584{
585 int err;
586 struct ubi_ec_hdr *ec_hdr;
587 unsigned long long ec = e->ec;
588
589 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
590
591 err = paranoid_check_ec(ubi, e->pnum, e->ec);
592 if (err > 0)
593 return -EINVAL;
594
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300595 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400596 if (!ec_hdr)
597 return -ENOMEM;
598
599 err = ubi_io_sync_erase(ubi, e->pnum, torture);
600 if (err < 0)
601 goto out_free;
602
603 ec += err;
604 if (ec > UBI_MAX_ERASECOUNTER) {
605 /*
606 * Erase counter overflow. Upgrade UBI and use 64-bit
607 * erase counters internally.
608 */
609 ubi_err("erase counter overflow at PEB %d, EC %llu",
610 e->pnum, ec);
611 err = -EINVAL;
612 goto out_free;
613 }
614
615 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
616
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300617 ec_hdr->ec = cpu_to_be64(ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400618
619 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
620 if (err)
621 goto out_free;
622
623 e->ec = ec;
624 spin_lock(&ubi->wl_lock);
625 if (e->ec > ubi->max_ec)
626 ubi->max_ec = e->ec;
627 spin_unlock(&ubi->wl_lock);
628
629out_free:
630 kfree(ec_hdr);
631 return err;
632}
633
634/**
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +0300635 * check_protection_over - check if it is time to stop protecting some PEBs.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400636 * @ubi: UBI device description object
637 *
638 * This function is called after each erase operation, when the absolute erase
639 * counter is incremented, to check if some physical eraseblock have not to be
640 * protected any longer. These physical eraseblocks are moved from the
641 * protection trees to the used tree.
642 */
643static void check_protection_over(struct ubi_device *ubi)
644{
645 struct ubi_wl_prot_entry *pe;
646
647 /*
648 * There may be several protected physical eraseblock to remove,
649 * process them all.
650 */
651 while (1) {
652 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300653 if (!ubi->prot.aec.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400654 spin_unlock(&ubi->wl_lock);
655 break;
656 }
657
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
660
661 if (pe->abs_ec > ubi->abs_ec) {
662 spin_unlock(&ubi->wl_lock);
663 break;
664 }
665
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300670 wl_tree_add(pe->e, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400671 spin_unlock(&ubi->wl_lock);
672
673 kfree(pe);
674 cond_resched();
675 }
676}
677
678/**
679 * schedule_ubi_work - schedule a work.
680 * @ubi: UBI device description object
681 * @wrk: the work to schedule
682 *
683 * This function enqueues a work defined by @wrk to the tail of the pending
684 * works list.
685 */
686static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
687{
688 spin_lock(&ubi->wl_lock);
689 list_add_tail(&wrk->list, &ubi->works);
690 ubi_assert(ubi->works_count >= 0);
691 ubi->works_count += 1;
692 if (ubi->thread_enabled)
693 wake_up_process(ubi->bgt_thread);
694 spin_unlock(&ubi->wl_lock);
695}
696
697static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
698 int cancel);
699
700/**
701 * schedule_erase - schedule an erase work.
702 * @ubi: UBI device description object
703 * @e: the WL entry of the physical eraseblock to erase
704 * @torture: if the physical eraseblock has to be tortured
705 *
706 * This function returns zero in case of success and a %-ENOMEM in case of
707 * failure.
708 */
709static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
710 int torture)
711{
712 struct ubi_work *wl_wrk;
713
714 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
715 e->pnum, e->ec, torture);
716
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300717 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400718 if (!wl_wrk)
719 return -ENOMEM;
720
721 wl_wrk->func = &erase_worker;
722 wl_wrk->e = e;
723 wl_wrk->torture = torture;
724
725 schedule_ubi_work(ubi, wl_wrk);
726 return 0;
727}
728
729/**
730 * wear_leveling_worker - wear-leveling worker function.
731 * @ubi: UBI device description object
732 * @wrk: the work object
733 * @cancel: non-zero if the worker has to free memory and exit
734 *
735 * This function copies a more worn out physical eraseblock to a less worn out
736 * one. Returns zero in case of success and a negative error code in case of
737 * failure.
738 */
739static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740 int cancel)
741{
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200742 int err, scrubbing = 0, torture = 0;
Artem Bityutskiyc18a8412008-01-24 11:19:14 +0200743 struct ubi_wl_prot_entry *uninitialized_var(pe);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400744 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr;
746
747 kfree(wrk);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400748 if (cancel)
749 return 0;
750
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300751 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400752 if (!vid_hdr)
753 return -ENOMEM;
754
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200755 mutex_lock(&ubi->move_mutex);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400756 spin_lock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200757 ubi_assert(!ubi->move_from && !ubi->move_to);
758 ubi_assert(!ubi->move_to_put);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400759
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200760 if (!ubi->free.rb_node ||
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300761 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400762 /*
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200763 * No free physical eraseblocks? Well, they must be waiting in
764 * the queue to be erased. Cancel movement - it will be
765 * triggered again when a free physical eraseblock appears.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400766 *
767 * No used physical eraseblocks? They must be temporarily
768 * protected from being moved. They will be moved to the
769 * @ubi->used tree later and the wear-leveling will be
770 * triggered again.
771 */
772 dbg_wl("cancel WL, a list is empty: free %d, used %d",
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300773 !ubi->free.rb_node, !ubi->used.rb_node);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200774 goto out_cancel;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400775 }
776
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300777 if (!ubi->scrub.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400778 /*
779 * Now pick the least worn-out used physical eraseblock and a
780 * highly worn-out free physical eraseblock. If the erase
781 * counters differ much enough, start wear-leveling.
782 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800783 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400784 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
785
786 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
787 dbg_wl("no WL needed: min used EC %d, max free EC %d",
788 e1->ec, e2->ec);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200789 goto out_cancel;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400790 }
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300791 paranoid_check_in_wl_tree(e1, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800792 rb_erase(&e1->u.rb, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400793 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
794 e1->pnum, e1->ec, e2->pnum, e2->ec);
795 } else {
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200796 /* Perform scrubbing */
797 scrubbing = 1;
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800798 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400799 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300800 paranoid_check_in_wl_tree(e1, &ubi->scrub);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800801 rb_erase(&e1->u.rb, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400802 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
803 }
804
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300805 paranoid_check_in_wl_tree(e2, &ubi->free);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800806 rb_erase(&e2->u.rb, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400807 ubi->move_from = e1;
808 ubi->move_to = e2;
809 spin_unlock(&ubi->wl_lock);
810
811 /*
812 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
813 * We so far do not know which logical eraseblock our physical
814 * eraseblock (@e1) belongs to. We have to read the volume identifier
815 * header first.
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200816 *
817 * Note, we are protected from this PEB being unmapped and erased. The
818 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
819 * which is being moved was unmapped.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400820 */
821
822 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
823 if (err && err != UBI_IO_BITFLIPS) {
824 if (err == UBI_IO_PEB_FREE) {
825 /*
826 * We are trying to move PEB without a VID header. UBI
827 * always write VID headers shortly after the PEB was
828 * given, so we have a situation when it did not have
829 * chance to write it down because it was preempted.
830 * Just re-schedule the work, so that next time it will
831 * likely have the VID header in place.
832 */
833 dbg_wl("PEB %d has no VID header", e1->pnum);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200834 goto out_not_moved;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400835 }
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200836
837 ubi_err("error %d while reading VID header from PEB %d",
838 err, e1->pnum);
839 if (err > 0)
840 err = -EIO;
841 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400842 }
843
844 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
845 if (err) {
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200846 if (err == -EAGAIN)
847 goto out_not_moved;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200848 if (err < 0)
849 goto out_error;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200850 if (err == 2) {
851 /* Target PEB write error, torture it */
852 torture = 1;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200853 goto out_not_moved;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200854 }
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200855
856 /*
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200857 * The LEB has not been moved because the volume is being
858 * deleted or the PEB has been put meanwhile. We should prevent
859 * this PEB from being selected for wear-leveling movement
860 * again, so put it to the protection tree.
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200861 */
862
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200863 dbg_wl("canceled moving PEB %d", e1->pnum);
864 ubi_assert(err == 1);
865
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200866 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
867 if (!pe) {
868 err = -ENOMEM;
869 goto out_error;
870 }
871
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200872 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200873 vid_hdr = NULL;
874
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200875 spin_lock(&ubi->wl_lock);
876 prot_tree_add(ubi, e1, pe, U_PROTECTION);
877 ubi_assert(!ubi->move_to_put);
878 ubi->move_from = ubi->move_to = NULL;
879 ubi->wl_scheduled = 0;
880 spin_unlock(&ubi->wl_lock);
881
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200882 e1 = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200883 err = schedule_erase(ubi, e2, 0);
884 if (err)
885 goto out_error;
886 mutex_unlock(&ubi->move_mutex);
887 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400888 }
889
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200890 /* The PEB has been successfully moved */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400891 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200892 vid_hdr = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200893 if (scrubbing)
Artem Bityutskiy8c1e6ee2008-07-18 12:20:23 +0300894 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
895 e1->pnum, e2->pnum);
896
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400897 spin_lock(&ubi->wl_lock);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200898 if (!ubi->move_to_put) {
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300899 wl_tree_add(e2, &ubi->used);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200900 e2 = NULL;
901 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400902 ubi->move_from = ubi->move_to = NULL;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200903 ubi->move_to_put = ubi->wl_scheduled = 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400904 spin_unlock(&ubi->wl_lock);
905
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200906 err = schedule_erase(ubi, e1, 0);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200907 if (err) {
908 e1 = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200909 goto out_error;
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200910 }
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200911
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200912 if (e2) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400913 /*
914 * Well, the target PEB was put meanwhile, schedule it for
915 * erasure.
916 */
917 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
918 err = schedule_erase(ubi, e2, 0);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200919 if (err)
920 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400921 }
922
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400923 dbg_wl("done");
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200924 mutex_unlock(&ubi->move_mutex);
925 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400926
927 /*
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200928 * For some reasons the LEB was not moved, might be an error, might be
929 * something else. @e1 was not changed, so return it back. @e2 might
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200930 * have been changed, schedule it for erasure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400931 */
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200932out_not_moved:
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200933 dbg_wl("canceled moving PEB %d", e1->pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400934 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200935 vid_hdr = NULL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400936 spin_lock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200937 if (scrubbing)
938 wl_tree_add(e1, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400939 else
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300940 wl_tree_add(e1, &ubi->used);
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200941 ubi_assert(!ubi->move_to_put);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400942 ubi->move_from = ubi->move_to = NULL;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200943 ubi->wl_scheduled = 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400944 spin_unlock(&ubi->wl_lock);
945
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200946 e1 = NULL;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200947 err = schedule_erase(ubi, e2, torture);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200948 if (err)
949 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400950
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200951 mutex_unlock(&ubi->move_mutex);
952 return 0;
953
954out_error:
955 ubi_err("error %d while moving PEB %d to PEB %d",
956 err, e1->pnum, e2->pnum);
957
958 ubi_free_vid_hdr(ubi, vid_hdr);
959 spin_lock(&ubi->wl_lock);
960 ubi->move_from = ubi->move_to = NULL;
961 ubi->move_to_put = ubi->wl_scheduled = 0;
962 spin_unlock(&ubi->wl_lock);
963
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200964 if (e1)
965 kmem_cache_free(ubi_wl_entry_slab, e1);
966 if (e2)
967 kmem_cache_free(ubi_wl_entry_slab, e2);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200968 ubi_ro_mode(ubi);
969
970 mutex_unlock(&ubi->move_mutex);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400971 return err;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200972
973out_cancel:
974 ubi->wl_scheduled = 0;
975 spin_unlock(&ubi->wl_lock);
976 mutex_unlock(&ubi->move_mutex);
977 ubi_free_vid_hdr(ubi, vid_hdr);
978 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400979}
980
981/**
982 * ensure_wear_leveling - schedule wear-leveling if it is needed.
983 * @ubi: UBI device description object
984 *
985 * This function checks if it is time to start wear-leveling and schedules it
986 * if yes. This function returns zero in case of success and a negative error
987 * code in case of failure.
988 */
989static int ensure_wear_leveling(struct ubi_device *ubi)
990{
991 int err = 0;
992 struct ubi_wl_entry *e1;
993 struct ubi_wl_entry *e2;
994 struct ubi_work *wrk;
995
996 spin_lock(&ubi->wl_lock);
997 if (ubi->wl_scheduled)
998 /* Wear-leveling is already in the work queue */
999 goto out_unlock;
1000
1001 /*
1002 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1003 * the WL worker has to be scheduled anyway.
1004 */
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001005 if (!ubi->scrub.rb_node) {
1006 if (!ubi->used.rb_node || !ubi->free.rb_node)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001007 /* No physical eraseblocks - no deal */
1008 goto out_unlock;
1009
1010 /*
1011 * We schedule wear-leveling only if the difference between the
1012 * lowest erase counter of used physical eraseblocks and a high
1013 * erase counter of free physical eraseblocks is greater then
1014 * %UBI_WL_THRESHOLD.
1015 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001016 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001017 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
1018
1019 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1020 goto out_unlock;
1021 dbg_wl("schedule wear-leveling");
1022 } else
1023 dbg_wl("schedule scrubbing");
1024
1025 ubi->wl_scheduled = 1;
1026 spin_unlock(&ubi->wl_lock);
1027
Artem Bityutskiy33818bb2007-08-28 21:29:32 +03001028 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001029 if (!wrk) {
1030 err = -ENOMEM;
1031 goto out_cancel;
1032 }
1033
1034 wrk->func = &wear_leveling_worker;
1035 schedule_ubi_work(ubi, wrk);
1036 return err;
1037
1038out_cancel:
1039 spin_lock(&ubi->wl_lock);
1040 ubi->wl_scheduled = 0;
1041out_unlock:
1042 spin_unlock(&ubi->wl_lock);
1043 return err;
1044}
1045
1046/**
1047 * erase_worker - physical eraseblock erase worker function.
1048 * @ubi: UBI device description object
1049 * @wl_wrk: the work object
1050 * @cancel: non-zero if the worker has to free memory and exit
1051 *
1052 * This function erases a physical eraseblock and perform torture testing if
1053 * needed. It also takes care about marking the physical eraseblock bad if
1054 * needed. Returns zero in case of success and a negative error code in case of
1055 * failure.
1056 */
1057static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1058 int cancel)
1059{
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001060 struct ubi_wl_entry *e = wl_wrk->e;
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001061 int pnum = e->pnum, err, need;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001062
1063 if (cancel) {
1064 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1065 kfree(wl_wrk);
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001066 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001067 return 0;
1068 }
1069
1070 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1071
1072 err = sync_erase(ubi, e, wl_wrk->torture);
1073 if (!err) {
1074 /* Fine, we've erased it successfully */
1075 kfree(wl_wrk);
1076
1077 spin_lock(&ubi->wl_lock);
1078 ubi->abs_ec += 1;
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001079 wl_tree_add(e, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001080 spin_unlock(&ubi->wl_lock);
1081
1082 /*
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +03001083 * One more erase operation has happened, take care about
1084 * protected physical eraseblocks.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001085 */
1086 check_protection_over(ubi);
1087
1088 /* And take care about wear-leveling */
1089 err = ensure_wear_leveling(ubi);
1090 return err;
1091 }
1092
Artem Bityutskiy8d2d4012007-07-22 22:32:51 +03001093 ubi_err("failed to erase PEB %d, error %d", pnum, err);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001094 kfree(wl_wrk);
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001095 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001096
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001097 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1098 err == -EBUSY) {
1099 int err1;
1100
1101 /* Re-schedule the LEB for erasure */
1102 err1 = schedule_erase(ubi, e, 0);
1103 if (err1) {
1104 err = err1;
1105 goto out_ro;
1106 }
1107 return err;
1108 } else if (err != -EIO) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001109 /*
1110 * If this is not %-EIO, we have no idea what to do. Scheduling
1111 * this physical eraseblock for erasure again would cause
1112 * errors again and again. Well, lets switch to RO mode.
1113 */
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001114 goto out_ro;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001115 }
1116
1117 /* It is %-EIO, the PEB went bad */
1118
1119 if (!ubi->bad_allowed) {
1120 ubi_err("bad physical eraseblock %d detected", pnum);
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001121 goto out_ro;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001122 }
1123
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001124 spin_lock(&ubi->volumes_lock);
1125 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1126 if (need > 0) {
1127 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1128 ubi->avail_pebs -= need;
1129 ubi->rsvd_pebs += need;
1130 ubi->beb_rsvd_pebs += need;
1131 if (need > 0)
1132 ubi_msg("reserve more %d PEBs", need);
1133 }
1134
1135 if (ubi->beb_rsvd_pebs == 0) {
1136 spin_unlock(&ubi->volumes_lock);
1137 ubi_err("no reserved physical eraseblocks");
1138 goto out_ro;
1139 }
1140
1141 spin_unlock(&ubi->volumes_lock);
1142 ubi_msg("mark PEB %d as bad", pnum);
1143
1144 err = ubi_io_mark_bad(ubi, pnum);
1145 if (err)
1146 goto out_ro;
1147
1148 spin_lock(&ubi->volumes_lock);
1149 ubi->beb_rsvd_pebs -= 1;
1150 ubi->bad_peb_count += 1;
1151 ubi->good_peb_count -= 1;
1152 ubi_calculate_reserved(ubi);
1153 if (ubi->beb_rsvd_pebs == 0)
1154 ubi_warn("last PEB from the reserved pool was used");
1155 spin_unlock(&ubi->volumes_lock);
1156
1157 return err;
1158
1159out_ro:
1160 ubi_ro_mode(ubi);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001161 return err;
1162}
1163
1164/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001165 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001166 * @ubi: UBI device description object
1167 * @pnum: physical eraseblock to return
1168 * @torture: if this physical eraseblock has to be tortured
1169 *
1170 * This function is called to return physical eraseblock @pnum to the pool of
1171 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1172 * occurred to this @pnum and it has to be tested. This function returns zero
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001173 * in case of success, and a negative error code in case of failure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001174 */
1175int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1176{
1177 int err;
1178 struct ubi_wl_entry *e;
1179
1180 dbg_wl("PEB %d", pnum);
1181 ubi_assert(pnum >= 0);
1182 ubi_assert(pnum < ubi->peb_count);
1183
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001184retry:
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001185 spin_lock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001186 e = ubi->lookuptbl[pnum];
1187 if (e == ubi->move_from) {
1188 /*
1189 * User is putting the physical eraseblock which was selected to
1190 * be moved. It will be scheduled for erasure in the
1191 * wear-leveling worker.
1192 */
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001193 dbg_wl("PEB %d is being moved, wait", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001194 spin_unlock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001195
1196 /* Wait for the WL worker by taking the @ubi->move_mutex */
1197 mutex_lock(&ubi->move_mutex);
1198 mutex_unlock(&ubi->move_mutex);
1199 goto retry;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001200 } else if (e == ubi->move_to) {
1201 /*
1202 * User is putting the physical eraseblock which was selected
1203 * as the target the data is moved to. It may happen if the EBA
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001204 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1205 * but the WL sub-system has not put the PEB to the "used" tree
1206 * yet, but it is about to do this. So we just set a flag which
1207 * will tell the WL worker that the PEB is not needed anymore
1208 * and should be scheduled for erasure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001209 */
1210 dbg_wl("PEB %d is the target of data moving", pnum);
1211 ubi_assert(!ubi->move_to_put);
1212 ubi->move_to_put = 1;
1213 spin_unlock(&ubi->wl_lock);
1214 return 0;
1215 } else {
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001216 if (in_wl_tree(e, &ubi->used)) {
1217 paranoid_check_in_wl_tree(e, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001218 rb_erase(&e->u.rb, &ubi->used);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001219 } else if (in_wl_tree(e, &ubi->scrub)) {
1220 paranoid_check_in_wl_tree(e, &ubi->scrub);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001221 rb_erase(&e->u.rb, &ubi->scrub);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001222 } else {
1223 err = prot_tree_del(ubi, e->pnum);
1224 if (err) {
1225 ubi_err("PEB %d not found", pnum);
1226 ubi_ro_mode(ubi);
1227 spin_unlock(&ubi->wl_lock);
1228 return err;
1229 }
1230 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001231 }
1232 spin_unlock(&ubi->wl_lock);
1233
1234 err = schedule_erase(ubi, e, torture);
1235 if (err) {
1236 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001237 wl_tree_add(e, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001238 spin_unlock(&ubi->wl_lock);
1239 }
1240
1241 return err;
1242}
1243
1244/**
1245 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1246 * @ubi: UBI device description object
1247 * @pnum: the physical eraseblock to schedule
1248 *
1249 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1250 * needs scrubbing. This function schedules a physical eraseblock for
1251 * scrubbing which is done in background. This function returns zero in case of
1252 * success and a negative error code in case of failure.
1253 */
1254int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1255{
1256 struct ubi_wl_entry *e;
1257
Artem Bityutskiy8c1e6ee2008-07-18 12:20:23 +03001258 dbg_msg("schedule PEB %d for scrubbing", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001259
1260retry:
1261 spin_lock(&ubi->wl_lock);
1262 e = ubi->lookuptbl[pnum];
1263 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1264 spin_unlock(&ubi->wl_lock);
1265 return 0;
1266 }
1267
1268 if (e == ubi->move_to) {
1269 /*
1270 * This physical eraseblock was used to move data to. The data
1271 * was moved but the PEB was not yet inserted to the proper
1272 * tree. We should just wait a little and let the WL worker
1273 * proceed.
1274 */
1275 spin_unlock(&ubi->wl_lock);
1276 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1277 yield();
1278 goto retry;
1279 }
1280
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001281 if (in_wl_tree(e, &ubi->used)) {
1282 paranoid_check_in_wl_tree(e, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001283 rb_erase(&e->u.rb, &ubi->used);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001284 } else {
1285 int err;
1286
1287 err = prot_tree_del(ubi, e->pnum);
1288 if (err) {
1289 ubi_err("PEB %d not found", pnum);
1290 ubi_ro_mode(ubi);
1291 spin_unlock(&ubi->wl_lock);
1292 return err;
1293 }
1294 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001295
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001296 wl_tree_add(e, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001297 spin_unlock(&ubi->wl_lock);
1298
1299 /*
1300 * Technically scrubbing is the same as wear-leveling, so it is done
1301 * by the WL worker.
1302 */
1303 return ensure_wear_leveling(ubi);
1304}
1305
1306/**
1307 * ubi_wl_flush - flush all pending works.
1308 * @ubi: UBI device description object
1309 *
1310 * This function returns zero in case of success and a negative error code in
1311 * case of failure.
1312 */
1313int ubi_wl_flush(struct ubi_device *ubi)
1314{
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001315 int err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001316
1317 /*
1318 * Erase while the pending works queue is not empty, but not more then
1319 * the number of currently pending works.
1320 */
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001321 dbg_wl("flush (%d pending works)", ubi->works_count);
1322 while (ubi->works_count) {
1323 err = do_work(ubi);
1324 if (err)
1325 return err;
1326 }
1327
1328 /*
1329 * Make sure all the works which have been done in parallel are
1330 * finished.
1331 */
1332 down_write(&ubi->work_sem);
1333 up_write(&ubi->work_sem);
1334
1335 /*
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +02001336 * And in case last was the WL worker and it canceled the LEB
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001337 * movement, flush again.
1338 */
1339 while (ubi->works_count) {
1340 dbg_wl("flush more (%d pending works)", ubi->works_count);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001341 err = do_work(ubi);
1342 if (err)
1343 return err;
1344 }
1345
1346 return 0;
1347}
1348
1349/**
1350 * tree_destroy - destroy an RB-tree.
1351 * @root: the root of the tree to destroy
1352 */
1353static void tree_destroy(struct rb_root *root)
1354{
1355 struct rb_node *rb;
1356 struct ubi_wl_entry *e;
1357
1358 rb = root->rb_node;
1359 while (rb) {
1360 if (rb->rb_left)
1361 rb = rb->rb_left;
1362 else if (rb->rb_right)
1363 rb = rb->rb_right;
1364 else {
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001365 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001366
1367 rb = rb_parent(rb);
1368 if (rb) {
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001369 if (rb->rb_left == &e->u.rb)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001370 rb->rb_left = NULL;
1371 else
1372 rb->rb_right = NULL;
1373 }
1374
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001375 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001376 }
1377 }
1378}
1379
1380/**
1381 * ubi_thread - UBI background thread.
1382 * @u: the UBI device description object pointer
1383 */
Artem Bityutskiycdfa7882007-12-17 20:33:20 +02001384int ubi_thread(void *u)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001385{
1386 int failures = 0;
1387 struct ubi_device *ubi = u;
1388
1389 ubi_msg("background thread \"%s\" started, PID %d",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001390 ubi->bgt_name, task_pid_nr(current));
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001391
Rafael J. Wysocki83144182007-07-17 04:03:35 -07001392 set_freezable();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001393 for (;;) {
1394 int err;
1395
1396 if (kthread_should_stop())
Kyungmin Parkcadb40c2008-05-22 10:32:18 +09001397 break;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001398
1399 if (try_to_freeze())
1400 continue;
1401
1402 spin_lock(&ubi->wl_lock);
1403 if (list_empty(&ubi->works) || ubi->ro_mode ||
1404 !ubi->thread_enabled) {
1405 set_current_state(TASK_INTERRUPTIBLE);
1406 spin_unlock(&ubi->wl_lock);
1407 schedule();
1408 continue;
1409 }
1410 spin_unlock(&ubi->wl_lock);
1411
1412 err = do_work(ubi);
1413 if (err) {
1414 ubi_err("%s: work failed with error code %d",
1415 ubi->bgt_name, err);
1416 if (failures++ > WL_MAX_FAILURES) {
1417 /*
1418 * Too many failures, disable the thread and
1419 * switch to read-only mode.
1420 */
1421 ubi_msg("%s: %d consecutive failures",
1422 ubi->bgt_name, WL_MAX_FAILURES);
1423 ubi_ro_mode(ubi);
Vitaliy Gusev2ad49882008-11-05 18:27:18 +03001424 ubi->thread_enabled = 0;
1425 continue;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001426 }
1427 } else
1428 failures = 0;
1429
1430 cond_resched();
1431 }
1432
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001433 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1434 return 0;
1435}
1436
1437/**
1438 * cancel_pending - cancel all pending works.
1439 * @ubi: UBI device description object
1440 */
1441static void cancel_pending(struct ubi_device *ubi)
1442{
1443 while (!list_empty(&ubi->works)) {
1444 struct ubi_work *wrk;
1445
1446 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1447 list_del(&wrk->list);
1448 wrk->func(ubi, wrk, 1);
1449 ubi->works_count -= 1;
1450 ubi_assert(ubi->works_count >= 0);
1451 }
1452}
1453
1454/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001455 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001456 * @ubi: UBI device description object
1457 * @si: scanning information
1458 *
1459 * This function returns zero in case of success, and a negative error code in
1460 * case of failure.
1461 */
1462int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1463{
1464 int err;
1465 struct rb_node *rb1, *rb2;
1466 struct ubi_scan_volume *sv;
1467 struct ubi_scan_leb *seb, *tmp;
1468 struct ubi_wl_entry *e;
1469
1470
1471 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1472 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1473 spin_lock_init(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001474 mutex_init(&ubi->move_mutex);
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001475 init_rwsem(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001476 ubi->max_ec = si->max_ec;
1477 INIT_LIST_HEAD(&ubi->works);
1478
1479 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1480
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001481 err = -ENOMEM;
1482 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1483 if (!ubi->lookuptbl)
Artem Bityutskiycdfa7882007-12-17 20:33:20 +02001484 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001485
1486 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1487 cond_resched();
1488
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001489 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001490 if (!e)
1491 goto out_free;
1492
1493 e->pnum = seb->pnum;
1494 e->ec = seb->ec;
1495 ubi->lookuptbl[e->pnum] = e;
1496 if (schedule_erase(ubi, e, 0)) {
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001497 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001498 goto out_free;
1499 }
1500 }
1501
1502 list_for_each_entry(seb, &si->free, u.list) {
1503 cond_resched();
1504
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001505 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001506 if (!e)
1507 goto out_free;
1508
1509 e->pnum = seb->pnum;
1510 e->ec = seb->ec;
1511 ubi_assert(e->ec >= 0);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001512 wl_tree_add(e, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001513 ubi->lookuptbl[e->pnum] = e;
1514 }
1515
1516 list_for_each_entry(seb, &si->corr, u.list) {
1517 cond_resched();
1518
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001519 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001520 if (!e)
1521 goto out_free;
1522
1523 e->pnum = seb->pnum;
1524 e->ec = seb->ec;
1525 ubi->lookuptbl[e->pnum] = e;
1526 if (schedule_erase(ubi, e, 0)) {
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001527 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001528 goto out_free;
1529 }
1530 }
1531
1532 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1533 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1534 cond_resched();
1535
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001536 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001537 if (!e)
1538 goto out_free;
1539
1540 e->pnum = seb->pnum;
1541 e->ec = seb->ec;
1542 ubi->lookuptbl[e->pnum] = e;
1543 if (!seb->scrub) {
1544 dbg_wl("add PEB %d EC %d to the used tree",
1545 e->pnum, e->ec);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001546 wl_tree_add(e, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001547 } else {
1548 dbg_wl("add PEB %d EC %d to the scrub tree",
1549 e->pnum, e->ec);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001550 wl_tree_add(e, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001551 }
1552 }
1553 }
1554
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001555 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001556 ubi_err("no enough physical eraseblocks (%d, need %d)",
1557 ubi->avail_pebs, WL_RESERVED_PEBS);
1558 goto out_free;
1559 }
1560 ubi->avail_pebs -= WL_RESERVED_PEBS;
1561 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1562
1563 /* Schedule wear-leveling if needed */
1564 err = ensure_wear_leveling(ubi);
1565 if (err)
1566 goto out_free;
1567
1568 return 0;
1569
1570out_free:
1571 cancel_pending(ubi);
1572 tree_destroy(&ubi->used);
1573 tree_destroy(&ubi->free);
1574 tree_destroy(&ubi->scrub);
1575 kfree(ubi->lookuptbl);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001576 return err;
1577}
1578
1579/**
1580 * protection_trees_destroy - destroy the protection RB-trees.
1581 * @ubi: UBI device description object
1582 */
1583static void protection_trees_destroy(struct ubi_device *ubi)
1584{
1585 struct rb_node *rb;
1586 struct ubi_wl_prot_entry *pe;
1587
1588 rb = ubi->prot.aec.rb_node;
1589 while (rb) {
1590 if (rb->rb_left)
1591 rb = rb->rb_left;
1592 else if (rb->rb_right)
1593 rb = rb->rb_right;
1594 else {
1595 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1596
1597 rb = rb_parent(rb);
1598 if (rb) {
1599 if (rb->rb_left == &pe->rb_aec)
1600 rb->rb_left = NULL;
1601 else
1602 rb->rb_right = NULL;
1603 }
1604
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001605 kmem_cache_free(ubi_wl_entry_slab, pe->e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001606 kfree(pe);
1607 }
1608 }
1609}
1610
1611/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001612 * ubi_wl_close - close the wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001613 * @ubi: UBI device description object
1614 */
1615void ubi_wl_close(struct ubi_device *ubi)
1616{
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001617 dbg_wl("close the WL sub-system");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001618 cancel_pending(ubi);
1619 protection_trees_destroy(ubi);
1620 tree_destroy(&ubi->used);
1621 tree_destroy(&ubi->free);
1622 tree_destroy(&ubi->scrub);
1623 kfree(ubi->lookuptbl);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001624}
1625
1626#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1627
1628/**
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001629 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001630 * @ubi: UBI device description object
1631 * @pnum: the physical eraseblock number to check
1632 * @ec: the erase counter to check
1633 *
1634 * This function returns zero if the erase counter of physical eraseblock @pnum
1635 * is equivalent to @ec, %1 if not, and a negative error code if an error
1636 * occurred.
1637 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +03001638static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001639{
1640 int err;
1641 long long read_ec;
1642 struct ubi_ec_hdr *ec_hdr;
1643
Artem Bityutskiy33818bb2007-08-28 21:29:32 +03001644 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001645 if (!ec_hdr)
1646 return -ENOMEM;
1647
1648 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1649 if (err && err != UBI_IO_BITFLIPS) {
1650 /* The header does not have to exist */
1651 err = 0;
1652 goto out_free;
1653 }
1654
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001655 read_ec = be64_to_cpu(ec_hdr->ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001656 if (ec != read_ec) {
1657 ubi_err("paranoid check failed for PEB %d", pnum);
1658 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1659 ubi_dbg_dump_stack();
1660 err = 1;
1661 } else
1662 err = 0;
1663
1664out_free:
1665 kfree(ec_hdr);
1666 return err;
1667}
1668
1669/**
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001670 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001671 * @e: the wear-leveling entry to check
1672 * @root: the root of the tree
1673 *
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001674 * This function returns zero if @e is in the @root RB-tree and %1 if it is
1675 * not.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001676 */
1677static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1678 struct rb_root *root)
1679{
1680 if (in_wl_tree(e, root))
1681 return 0;
1682
1683 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1684 e->pnum, e->ec, root);
1685 ubi_dbg_dump_stack();
1686 return 1;
1687}
1688
1689#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */