blob: ec915c02301cdb9bbc8bba07839b5fbd00684260 [file] [log] [blame]
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19 */
20
21/*
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030022 * UBI wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040023 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030024 * This sub-system is responsible for wear-leveling. It works in terms of
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +080025 * physical eraseblocks and erase counters and knows nothing about logical
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030026 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040030 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030032 * header. The rest of the physical eraseblock contains only %0xFF bytes.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040033 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030034 * When physical eraseblocks are returned to the WL sub-system by means of the
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040035 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030037 * which is also managed by the WL sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040038 *
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
42 *
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030046 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040048 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030049 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50 * bad.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040051 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030052 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040055 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030056 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +080058 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
70 *
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
88 *
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040091 *
92 * Note, in this implementation, we keep a small in-RAM object for each physical
93 * eraseblock. This is surely not a scalable solution. But it appears to be good
94 * enough for moderately large flashes and it is simple. In future, one may
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030095 * re-work this sub-system and make it more scalable.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +040096 *
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +030097 * At the moment this sub-system does not utilize the sequence number, which
98 * was introduced relatively recently. But it would be wise to do this because
99 * the sequence number of a logical eraseblock characterizes how old is it. For
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400100 * example, when we move a PEB with low erase counter, and we need to pick the
101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
102 * pick target PEB with an average EC if our PEB is not very "old". This is a
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300103 * room for future re-works of the WL sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400104 */
105
106#include <linux/slab.h>
107#include <linux/crc32.h>
108#include <linux/freezer.h>
109#include <linux/kthread.h>
110#include "ubi.h"
111
112/* Number of physical eraseblocks reserved for wear-leveling purposes */
113#define WL_RESERVED_PEBS 1
114
115/*
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400116 * Maximum difference between two erase counters. If this threshold is
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300117 * exceeded, the WL sub-system starts moving data from used physical
118 * eraseblocks with low erase counter to free physical eraseblocks with high
119 * erase counter.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400120 */
121#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
122
123/*
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300124 * When a physical eraseblock is moved, the WL sub-system has to pick the target
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400125 * physical eraseblock to move to. The simplest way would be just to pick the
126 * one with the highest erase counter. But in certain workloads this could lead
127 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
128 * situation when the picked physical eraseblock is constantly erased after the
129 * data is written to it. So, we have a constant which limits the highest erase
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +0300130 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200131 * does not pick eraseblocks with erase counter greater than the lowest erase
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400132 * counter plus %WL_FREE_MAX_DIFF.
133 */
134#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
135
136/*
137 * Maximum number of consecutive background thread failures which is enough to
138 * switch to read-only mode.
139 */
140#define WL_MAX_FAILURES 32
141
142/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400143 * struct ubi_work - UBI work description data structure.
144 * @list: a link in the list of pending works
145 * @func: worker function
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400146 * @e: physical eraseblock to erase
147 * @torture: if the physical eraseblock has to be tortured
148 *
149 * The @func pointer points to the worker function. If the @cancel argument is
150 * not zero, the worker has to free the resources and exit immediately. The
151 * worker has to return zero in case of success and a negative error code in
152 * case of failure.
153 */
154struct ubi_work {
155 struct list_head list;
156 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
157 /* The below fields are only relevant to erasure works */
158 struct ubi_wl_entry *e;
159 int torture;
160};
161
162#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
Artem Bityutskiye88d6e102007-08-29 14:51:52 +0300163static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400164static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
165 struct rb_root *root);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800166static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400167#else
168#define paranoid_check_ec(ubi, pnum, ec) 0
169#define paranoid_check_in_wl_tree(e, root)
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800170#define paranoid_check_in_pq(ubi, e) 0
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400171#endif
172
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400173/**
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400174 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
175 * @e: the wear-leveling entry to add
176 * @root: the root of the tree
177 *
178 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
179 * the @ubi->used and @ubi->free RB-trees.
180 */
181static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
182{
183 struct rb_node **p, *parent = NULL;
184
185 p = &root->rb_node;
186 while (*p) {
187 struct ubi_wl_entry *e1;
188
189 parent = *p;
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800190 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400191
192 if (e->ec < e1->ec)
193 p = &(*p)->rb_left;
194 else if (e->ec > e1->ec)
195 p = &(*p)->rb_right;
196 else {
197 ubi_assert(e->pnum != e1->pnum);
198 if (e->pnum < e1->pnum)
199 p = &(*p)->rb_left;
200 else
201 p = &(*p)->rb_right;
202 }
203 }
204
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800205 rb_link_node(&e->u.rb, parent, p);
206 rb_insert_color(&e->u.rb, root);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400207}
208
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400209/**
210 * do_work - do one pending work.
211 * @ubi: UBI device description object
212 *
213 * This function returns zero in case of success and a negative error code in
214 * case of failure.
215 */
216static int do_work(struct ubi_device *ubi)
217{
218 int err;
219 struct ubi_work *wrk;
220
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200221 cond_resched();
222
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200223 /*
224 * @ubi->work_sem is used to synchronize with the workers. Workers take
225 * it in read mode, so many of them may be doing works at a time. But
226 * the queue flush code has to be sure the whole queue of works is
227 * done, and it takes the mutex in write mode.
228 */
229 down_read(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400230 spin_lock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400231 if (list_empty(&ubi->works)) {
232 spin_unlock(&ubi->wl_lock);
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200233 up_read(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400234 return 0;
235 }
236
237 wrk = list_entry(ubi->works.next, struct ubi_work, list);
238 list_del(&wrk->list);
Artem Bityutskiy16f557e2007-12-19 16:03:17 +0200239 ubi->works_count -= 1;
240 ubi_assert(ubi->works_count >= 0);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400241 spin_unlock(&ubi->wl_lock);
242
243 /*
244 * Call the worker function. Do not touch the work structure
245 * after this call as it will have been freed or reused by that
246 * time by the worker function.
247 */
248 err = wrk->func(ubi, wrk, 0);
249 if (err)
250 ubi_err("work failed with error code %d", err);
Artem Bityutskiy593dd332007-12-18 15:54:35 +0200251 up_read(&ubi->work_sem);
Artem Bityutskiy16f557e2007-12-19 16:03:17 +0200252
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400253 return err;
254}
255
256/**
257 * produce_free_peb - produce a free physical eraseblock.
258 * @ubi: UBI device description object
259 *
260 * This function tries to make a free PEB by means of synchronous execution of
261 * pending works. This may be needed if, for example the background thread is
262 * disabled. Returns zero in case of success and a negative error code in case
263 * of failure.
264 */
265static int produce_free_peb(struct ubi_device *ubi)
266{
267 int err;
268
269 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300270 while (!ubi->free.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400271 spin_unlock(&ubi->wl_lock);
272
273 dbg_wl("do one work synchronously");
274 err = do_work(ubi);
275 if (err)
276 return err;
277
278 spin_lock(&ubi->wl_lock);
279 }
280 spin_unlock(&ubi->wl_lock);
281
282 return 0;
283}
284
285/**
286 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
287 * @e: the wear-leveling entry to check
288 * @root: the root of the tree
289 *
290 * This function returns non-zero if @e is in the @root RB-tree and zero if it
291 * is not.
292 */
293static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
294{
295 struct rb_node *p;
296
297 p = root->rb_node;
298 while (p) {
299 struct ubi_wl_entry *e1;
300
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800301 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400302
303 if (e->pnum == e1->pnum) {
304 ubi_assert(e == e1);
305 return 1;
306 }
307
308 if (e->ec < e1->ec)
309 p = p->rb_left;
310 else if (e->ec > e1->ec)
311 p = p->rb_right;
312 else {
313 ubi_assert(e->pnum != e1->pnum);
314 if (e->pnum < e1->pnum)
315 p = p->rb_left;
316 else
317 p = p->rb_right;
318 }
319 }
320
321 return 0;
322}
323
324/**
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800325 * prot_queue_add - add physical eraseblock to the protection queue.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400326 * @ubi: UBI device description object
327 * @e: the physical eraseblock to add
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400328 *
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332 * be locked.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400333 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800334static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400335{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800336 int pq_tail = ubi->pq_head - 1;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400337
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800338 if (pq_tail < 0)
339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400343}
344
345/**
346 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
347 * @root: the RB-tree where to look for
348 * @max: highest possible erase counter
349 *
350 * This function looks for a wear leveling entry with erase counter closest to
351 * @max and less then @max.
352 */
353static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
354{
355 struct rb_node *p;
356 struct ubi_wl_entry *e;
357
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800358 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400359 max += e->ec;
360
361 p = root->rb_node;
362 while (p) {
363 struct ubi_wl_entry *e1;
364
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800365 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400366 if (e1->ec >= max)
367 p = p->rb_left;
368 else {
369 p = p->rb_right;
370 e = e1;
371 }
372 }
373
374 return e;
375}
376
377/**
378 * ubi_wl_get_peb - get a physical eraseblock.
379 * @ubi: UBI device description object
380 * @dtype: type of data which will be stored in this physical eraseblock
381 *
382 * This function returns a physical eraseblock in case of success and a
383 * negative error code in case of failure. Might sleep.
384 */
385int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
386{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800387 int err, medium_ec;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400388 struct ubi_wl_entry *e, *first, *last;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400389
390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
391 dtype == UBI_UNKNOWN);
392
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400393retry:
394 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300395 if (!ubi->free.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400396 if (ubi->works_count == 0) {
397 ubi_assert(list_empty(&ubi->works));
398 ubi_err("no free eraseblocks");
399 spin_unlock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400400 return -ENOSPC;
401 }
402 spin_unlock(&ubi->wl_lock);
403
404 err = produce_free_peb(ubi);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800405 if (err < 0)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400406 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400407 goto retry;
408 }
409
410 switch (dtype) {
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300411 case UBI_LONGTERM:
412 /*
413 * For long term data we pick a physical eraseblock with high
414 * erase counter. But the highest erase counter we can pick is
415 * bounded by the the lowest erase counter plus
416 * %WL_FREE_MAX_DIFF.
417 */
418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300419 break;
420 case UBI_UNKNOWN:
421 /*
422 * For unknown data we pick a physical eraseblock with medium
423 * erase counter. But we by no means can pick a physical
424 * eraseblock with erase counter greater or equivalent than the
425 * lowest erase counter plus %WL_FREE_MAX_DIFF.
426 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800427 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
428 u.rb);
429 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400430
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300431 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
432 e = rb_entry(ubi->free.rb_node,
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800433 struct ubi_wl_entry, u.rb);
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300434 else {
435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
436 e = find_wl_entry(&ubi->free, medium_ec);
437 }
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300438 break;
439 case UBI_SHORTTERM:
440 /*
441 * For short term data we pick a physical eraseblock with the
442 * lowest erase counter as we expect it will be erased soon.
443 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300445 break;
446 default:
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300447 BUG();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400448 }
449
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800450 paranoid_check_in_wl_tree(e, &ubi->free);
451
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400452 /*
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800453 * Move the physical eraseblock to the protection queue where it will
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400454 * be protected from being moved for some time.
455 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800456 rb_erase(&e->u.rb, &ubi->free);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
458 prot_queue_add(ubi, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400459 spin_unlock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400460 return e->pnum;
461}
462
463/**
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800464 * prot_queue_del - remove a physical eraseblock from the protection queue.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400465 * @ubi: UBI device description object
466 * @pnum: the physical eraseblock to remove
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200467 *
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800468 * This function deletes PEB @pnum from the protection queue and returns zero
469 * in case of success and %-ENODEV if the PEB was not found.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400470 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800471static int prot_queue_del(struct ubi_device *ubi, int pnum)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400472{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800473 struct ubi_wl_entry *e;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400474
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800475 e = ubi->lookuptbl[pnum];
476 if (!e)
477 return -ENODEV;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400478
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800479 if (paranoid_check_in_pq(ubi, e))
480 return -ENODEV;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400481
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800482 list_del(&e->u.list);
483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200484 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400485}
486
487/**
488 * sync_erase - synchronously erase a physical eraseblock.
489 * @ubi: UBI device description object
490 * @e: the the physical eraseblock to erase
491 * @torture: if the physical eraseblock has to be tortured
492 *
493 * This function returns zero in case of success and a negative error code in
494 * case of failure.
495 */
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300496static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
497 int torture)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400498{
499 int err;
500 struct ubi_ec_hdr *ec_hdr;
501 unsigned long long ec = e->ec;
502
503 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
504
505 err = paranoid_check_ec(ubi, e->pnum, e->ec);
506 if (err > 0)
507 return -EINVAL;
508
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300509 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400510 if (!ec_hdr)
511 return -ENOMEM;
512
513 err = ubi_io_sync_erase(ubi, e->pnum, torture);
514 if (err < 0)
515 goto out_free;
516
517 ec += err;
518 if (ec > UBI_MAX_ERASECOUNTER) {
519 /*
520 * Erase counter overflow. Upgrade UBI and use 64-bit
521 * erase counters internally.
522 */
523 ubi_err("erase counter overflow at PEB %d, EC %llu",
524 e->pnum, ec);
525 err = -EINVAL;
526 goto out_free;
527 }
528
529 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
530
Christoph Hellwig3261ebd2007-05-21 17:41:46 +0300531 ec_hdr->ec = cpu_to_be64(ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400532
533 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
534 if (err)
535 goto out_free;
536
537 e->ec = ec;
538 spin_lock(&ubi->wl_lock);
539 if (e->ec > ubi->max_ec)
540 ubi->max_ec = e->ec;
541 spin_unlock(&ubi->wl_lock);
542
543out_free:
544 kfree(ec_hdr);
545 return err;
546}
547
548/**
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800549 * serve_prot_queue - check if it is time to stop protecting PEBs.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400550 * @ubi: UBI device description object
551 *
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800552 * This function is called after each erase operation and removes PEBs from the
553 * tail of the protection queue. These PEBs have been protected for long enough
554 * and should be moved to the used tree.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400555 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800556static void serve_prot_queue(struct ubi_device *ubi)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400557{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800558 struct ubi_wl_entry *e, *tmp;
559 int count;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400560
561 /*
562 * There may be several protected physical eraseblock to remove,
563 * process them all.
564 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800565repeat:
566 count = 0;
567 spin_lock(&ubi->wl_lock);
568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
569 dbg_wl("PEB %d EC %d protection over, move to used tree",
570 e->pnum, e->ec);
571
572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
574 if (count++ > 32) {
575 /*
576 * Let's be nice and avoid holding the spinlock for
577 * too long.
578 */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400579 spin_unlock(&ubi->wl_lock);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800580 cond_resched();
581 goto repeat;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400582 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400583 }
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800584
585 ubi->pq_head += 1;
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
587 ubi->pq_head = 0;
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400590}
591
592/**
593 * schedule_ubi_work - schedule a work.
594 * @ubi: UBI device description object
595 * @wrk: the work to schedule
596 *
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800597 * This function adds a work defined by @wrk to the tail of the pending works
598 * list.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400599 */
600static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
601{
602 spin_lock(&ubi->wl_lock);
603 list_add_tail(&wrk->list, &ubi->works);
604 ubi_assert(ubi->works_count >= 0);
605 ubi->works_count += 1;
606 if (ubi->thread_enabled)
607 wake_up_process(ubi->bgt_thread);
608 spin_unlock(&ubi->wl_lock);
609}
610
611static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
612 int cancel);
613
614/**
615 * schedule_erase - schedule an erase work.
616 * @ubi: UBI device description object
617 * @e: the WL entry of the physical eraseblock to erase
618 * @torture: if the physical eraseblock has to be tortured
619 *
620 * This function returns zero in case of success and a %-ENOMEM in case of
621 * failure.
622 */
623static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
624 int torture)
625{
626 struct ubi_work *wl_wrk;
627
628 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
629 e->pnum, e->ec, torture);
630
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300631 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400632 if (!wl_wrk)
633 return -ENOMEM;
634
635 wl_wrk->func = &erase_worker;
636 wl_wrk->e = e;
637 wl_wrk->torture = torture;
638
639 schedule_ubi_work(ubi, wl_wrk);
640 return 0;
641}
642
643/**
644 * wear_leveling_worker - wear-leveling worker function.
645 * @ubi: UBI device description object
646 * @wrk: the work object
647 * @cancel: non-zero if the worker has to free memory and exit
648 *
649 * This function copies a more worn out physical eraseblock to a less worn out
650 * one. Returns zero in case of success and a negative error code in case of
651 * failure.
652 */
653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
654 int cancel)
655{
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200656 int err, scrubbing = 0, torture = 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400657 struct ubi_wl_entry *e1, *e2;
658 struct ubi_vid_hdr *vid_hdr;
659
660 kfree(wrk);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400661 if (cancel)
662 return 0;
663
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300664 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400665 if (!vid_hdr)
666 return -ENOMEM;
667
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200668 mutex_lock(&ubi->move_mutex);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400669 spin_lock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200670 ubi_assert(!ubi->move_from && !ubi->move_to);
671 ubi_assert(!ubi->move_to_put);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400672
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200673 if (!ubi->free.rb_node ||
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300674 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400675 /*
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200676 * No free physical eraseblocks? Well, they must be waiting in
677 * the queue to be erased. Cancel movement - it will be
678 * triggered again when a free physical eraseblock appears.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400679 *
680 * No used physical eraseblocks? They must be temporarily
681 * protected from being moved. They will be moved to the
682 * @ubi->used tree later and the wear-leveling will be
683 * triggered again.
684 */
685 dbg_wl("cancel WL, a list is empty: free %d, used %d",
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300686 !ubi->free.rb_node, !ubi->used.rb_node);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200687 goto out_cancel;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400688 }
689
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300690 if (!ubi->scrub.rb_node) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400691 /*
692 * Now pick the least worn-out used physical eraseblock and a
693 * highly worn-out free physical eraseblock. If the erase
694 * counters differ much enough, start wear-leveling.
695 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800696 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400697 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
698
699 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
700 dbg_wl("no WL needed: min used EC %d, max free EC %d",
701 e1->ec, e2->ec);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200702 goto out_cancel;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400703 }
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300704 paranoid_check_in_wl_tree(e1, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800705 rb_erase(&e1->u.rb, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400706 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
707 e1->pnum, e1->ec, e2->pnum, e2->ec);
708 } else {
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200709 /* Perform scrubbing */
710 scrubbing = 1;
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800711 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400712 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300713 paranoid_check_in_wl_tree(e1, &ubi->scrub);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800714 rb_erase(&e1->u.rb, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400715 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
716 }
717
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300718 paranoid_check_in_wl_tree(e2, &ubi->free);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800719 rb_erase(&e2->u.rb, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400720 ubi->move_from = e1;
721 ubi->move_to = e2;
722 spin_unlock(&ubi->wl_lock);
723
724 /*
725 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
726 * We so far do not know which logical eraseblock our physical
727 * eraseblock (@e1) belongs to. We have to read the volume identifier
728 * header first.
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200729 *
730 * Note, we are protected from this PEB being unmapped and erased. The
731 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
732 * which is being moved was unmapped.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400733 */
734
735 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
736 if (err && err != UBI_IO_BITFLIPS) {
737 if (err == UBI_IO_PEB_FREE) {
738 /*
739 * We are trying to move PEB without a VID header. UBI
740 * always write VID headers shortly after the PEB was
741 * given, so we have a situation when it did not have
742 * chance to write it down because it was preempted.
743 * Just re-schedule the work, so that next time it will
744 * likely have the VID header in place.
745 */
746 dbg_wl("PEB %d has no VID header", e1->pnum);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200747 goto out_not_moved;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400748 }
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200749
750 ubi_err("error %d while reading VID header from PEB %d",
751 err, e1->pnum);
752 if (err > 0)
753 err = -EIO;
754 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400755 }
756
757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
758 if (err) {
Artem Bityutskiy90bf0262009-05-23 16:04:17 +0300759 if (err == MOVE_CANCEL_BITFLIPS ||
760 err == MOVE_TARGET_WR_ERR) {
761 /* Target PEB bit-flips or write error, torture it */
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200762 torture = 1;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200763 goto out_not_moved;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200764 }
Artem Bityutskiy90bf0262009-05-23 16:04:17 +0300765 if (err < 0)
766 goto out_error;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200767
768 /*
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200769 * The LEB has not been moved because the volume is being
770 * deleted or the PEB has been put meanwhile. We should prevent
771 * this PEB from being selected for wear-leveling movement
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800772 * again, so put it to the protection queue.
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200773 */
774
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200775 dbg_wl("canceled moving PEB %d", e1->pnum);
Artem Bityutskiy90bf0262009-05-23 16:04:17 +0300776 ubi_assert(err == MOVE_CANCEL_RACE);
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200777
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200778 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200779 vid_hdr = NULL;
780
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200781 spin_lock(&ubi->wl_lock);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800782 prot_queue_add(ubi, e1);
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200783 ubi_assert(!ubi->move_to_put);
784 ubi->move_from = ubi->move_to = NULL;
785 ubi->wl_scheduled = 0;
786 spin_unlock(&ubi->wl_lock);
787
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200788 e1 = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200789 err = schedule_erase(ubi, e2, 0);
790 if (err)
791 goto out_error;
792 mutex_unlock(&ubi->move_mutex);
793 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400794 }
795
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200796 /* The PEB has been successfully moved */
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400797 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200798 vid_hdr = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200799 if (scrubbing)
Artem Bityutskiy8c1e6ee2008-07-18 12:20:23 +0300800 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
801 e1->pnum, e2->pnum);
802
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400803 spin_lock(&ubi->wl_lock);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200804 if (!ubi->move_to_put) {
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300805 wl_tree_add(e2, &ubi->used);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200806 e2 = NULL;
807 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400808 ubi->move_from = ubi->move_to = NULL;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200809 ubi->move_to_put = ubi->wl_scheduled = 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400810 spin_unlock(&ubi->wl_lock);
811
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200812 err = schedule_erase(ubi, e1, 0);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200813 if (err) {
814 e1 = NULL;
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200815 goto out_error;
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200816 }
Artem Bityutskiy6a8f4832008-12-05 12:23:48 +0200817
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200818 if (e2) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400819 /*
820 * Well, the target PEB was put meanwhile, schedule it for
821 * erasure.
822 */
823 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
824 err = schedule_erase(ubi, e2, 0);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200825 if (err)
826 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400827 }
828
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400829 dbg_wl("done");
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200830 mutex_unlock(&ubi->move_mutex);
831 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400832
833 /*
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200834 * For some reasons the LEB was not moved, might be an error, might be
835 * something else. @e1 was not changed, so return it back. @e2 might
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200836 * have been changed, schedule it for erasure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400837 */
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200838out_not_moved:
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200839 dbg_wl("canceled moving PEB %d", e1->pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400840 ubi_free_vid_hdr(ubi, vid_hdr);
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200841 vid_hdr = NULL;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400842 spin_lock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200843 if (scrubbing)
844 wl_tree_add(e1, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400845 else
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300846 wl_tree_add(e1, &ubi->used);
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200847 ubi_assert(!ubi->move_to_put);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400848 ubi->move_from = ubi->move_to = NULL;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200849 ubi->wl_scheduled = 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400850 spin_unlock(&ubi->wl_lock);
851
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200852 e1 = NULL;
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +0200853 err = schedule_erase(ubi, e2, torture);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200854 if (err)
855 goto out_error;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400856
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200857 mutex_unlock(&ubi->move_mutex);
858 return 0;
859
860out_error:
861 ubi_err("error %d while moving PEB %d to PEB %d",
862 err, e1->pnum, e2->pnum);
863
864 ubi_free_vid_hdr(ubi, vid_hdr);
865 spin_lock(&ubi->wl_lock);
866 ubi->move_from = ubi->move_to = NULL;
867 ubi->move_to_put = ubi->wl_scheduled = 0;
868 spin_unlock(&ubi->wl_lock);
869
Artem Bityutskiy3c98b0a2008-12-05 12:42:45 +0200870 if (e1)
871 kmem_cache_free(ubi_wl_entry_slab, e1);
872 if (e2)
873 kmem_cache_free(ubi_wl_entry_slab, e2);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200874 ubi_ro_mode(ubi);
875
876 mutex_unlock(&ubi->move_mutex);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400877 return err;
Artem Bityutskiy43f9b252007-12-18 15:06:55 +0200878
879out_cancel:
880 ubi->wl_scheduled = 0;
881 spin_unlock(&ubi->wl_lock);
882 mutex_unlock(&ubi->move_mutex);
883 ubi_free_vid_hdr(ubi, vid_hdr);
884 return 0;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400885}
886
887/**
888 * ensure_wear_leveling - schedule wear-leveling if it is needed.
889 * @ubi: UBI device description object
890 *
891 * This function checks if it is time to start wear-leveling and schedules it
892 * if yes. This function returns zero in case of success and a negative error
893 * code in case of failure.
894 */
895static int ensure_wear_leveling(struct ubi_device *ubi)
896{
897 int err = 0;
898 struct ubi_wl_entry *e1;
899 struct ubi_wl_entry *e2;
900 struct ubi_work *wrk;
901
902 spin_lock(&ubi->wl_lock);
903 if (ubi->wl_scheduled)
904 /* Wear-leveling is already in the work queue */
905 goto out_unlock;
906
907 /*
908 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
909 * the WL worker has to be scheduled anyway.
910 */
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300911 if (!ubi->scrub.rb_node) {
912 if (!ubi->used.rb_node || !ubi->free.rb_node)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400913 /* No physical eraseblocks - no deal */
914 goto out_unlock;
915
916 /*
917 * We schedule wear-leveling only if the difference between the
918 * lowest erase counter of used physical eraseblocks and a high
Frederik Schwarzer025dfda2008-10-16 19:02:37 +0200919 * erase counter of free physical eraseblocks is greater than
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400920 * %UBI_WL_THRESHOLD.
921 */
Xiaochuan-Xu23553b22008-12-09 19:44:12 +0800922 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400923 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
924
925 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
926 goto out_unlock;
927 dbg_wl("schedule wear-leveling");
928 } else
929 dbg_wl("schedule scrubbing");
930
931 ubi->wl_scheduled = 1;
932 spin_unlock(&ubi->wl_lock);
933
Artem Bityutskiy33818bb2007-08-28 21:29:32 +0300934 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400935 if (!wrk) {
936 err = -ENOMEM;
937 goto out_cancel;
938 }
939
940 wrk->func = &wear_leveling_worker;
941 schedule_ubi_work(ubi, wrk);
942 return err;
943
944out_cancel:
945 spin_lock(&ubi->wl_lock);
946 ubi->wl_scheduled = 0;
947out_unlock:
948 spin_unlock(&ubi->wl_lock);
949 return err;
950}
951
952/**
953 * erase_worker - physical eraseblock erase worker function.
954 * @ubi: UBI device description object
955 * @wl_wrk: the work object
956 * @cancel: non-zero if the worker has to free memory and exit
957 *
958 * This function erases a physical eraseblock and perform torture testing if
959 * needed. It also takes care about marking the physical eraseblock bad if
960 * needed. Returns zero in case of success and a negative error code in case of
961 * failure.
962 */
963static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
964 int cancel)
965{
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400966 struct ubi_wl_entry *e = wl_wrk->e;
Artem Bityutskiy784c1452007-07-18 13:42:10 +0300967 int pnum = e->pnum, err, need;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400968
969 if (cancel) {
970 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
971 kfree(wl_wrk);
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +0200972 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400973 return 0;
974 }
975
976 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
977
978 err = sync_erase(ubi, e, wl_wrk->torture);
979 if (!err) {
980 /* Fine, we've erased it successfully */
981 kfree(wl_wrk);
982
983 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +0300984 wl_tree_add(e, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400985 spin_unlock(&ubi->wl_lock);
986
987 /*
Artem Bityutskiy9c9ec142008-07-18 13:19:52 +0300988 * One more erase operation has happened, take care about
989 * protected physical eraseblocks.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400990 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +0800991 serve_prot_queue(ubi);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400992
993 /* And take care about wear-leveling */
994 err = ensure_wear_leveling(ubi);
995 return err;
996 }
997
Artem Bityutskiy8d2d4012007-07-22 22:32:51 +0300998 ubi_err("failed to erase PEB %d, error %d", pnum, err);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +0400999 kfree(wl_wrk);
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001000 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001001
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001002 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1003 err == -EBUSY) {
1004 int err1;
1005
1006 /* Re-schedule the LEB for erasure */
1007 err1 = schedule_erase(ubi, e, 0);
1008 if (err1) {
1009 err = err1;
1010 goto out_ro;
1011 }
1012 return err;
1013 } else if (err != -EIO) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001014 /*
1015 * If this is not %-EIO, we have no idea what to do. Scheduling
1016 * this physical eraseblock for erasure again would cause
1017 * errors again and again. Well, lets switch to RO mode.
1018 */
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001019 goto out_ro;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001020 }
1021
1022 /* It is %-EIO, the PEB went bad */
1023
1024 if (!ubi->bad_allowed) {
1025 ubi_err("bad physical eraseblock %d detected", pnum);
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001026 goto out_ro;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001027 }
1028
Artem Bityutskiy784c1452007-07-18 13:42:10 +03001029 spin_lock(&ubi->volumes_lock);
1030 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1031 if (need > 0) {
1032 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1033 ubi->avail_pebs -= need;
1034 ubi->rsvd_pebs += need;
1035 ubi->beb_rsvd_pebs += need;
1036 if (need > 0)
1037 ubi_msg("reserve more %d PEBs", need);
1038 }
1039
1040 if (ubi->beb_rsvd_pebs == 0) {
1041 spin_unlock(&ubi->volumes_lock);
1042 ubi_err("no reserved physical eraseblocks");
1043 goto out_ro;
1044 }
1045
1046 spin_unlock(&ubi->volumes_lock);
1047 ubi_msg("mark PEB %d as bad", pnum);
1048
1049 err = ubi_io_mark_bad(ubi, pnum);
1050 if (err)
1051 goto out_ro;
1052
1053 spin_lock(&ubi->volumes_lock);
1054 ubi->beb_rsvd_pebs -= 1;
1055 ubi->bad_peb_count += 1;
1056 ubi->good_peb_count -= 1;
1057 ubi_calculate_reserved(ubi);
1058 if (ubi->beb_rsvd_pebs == 0)
1059 ubi_warn("last PEB from the reserved pool was used");
1060 spin_unlock(&ubi->volumes_lock);
1061
1062 return err;
1063
1064out_ro:
1065 ubi_ro_mode(ubi);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001066 return err;
1067}
1068
1069/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001070 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001071 * @ubi: UBI device description object
1072 * @pnum: physical eraseblock to return
1073 * @torture: if this physical eraseblock has to be tortured
1074 *
1075 * This function is called to return physical eraseblock @pnum to the pool of
1076 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1077 * occurred to this @pnum and it has to be tested. This function returns zero
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001078 * in case of success, and a negative error code in case of failure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001079 */
1080int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1081{
1082 int err;
1083 struct ubi_wl_entry *e;
1084
1085 dbg_wl("PEB %d", pnum);
1086 ubi_assert(pnum >= 0);
1087 ubi_assert(pnum < ubi->peb_count);
1088
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001089retry:
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001090 spin_lock(&ubi->wl_lock);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001091 e = ubi->lookuptbl[pnum];
1092 if (e == ubi->move_from) {
1093 /*
1094 * User is putting the physical eraseblock which was selected to
1095 * be moved. It will be scheduled for erasure in the
1096 * wear-leveling worker.
1097 */
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001098 dbg_wl("PEB %d is being moved, wait", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001099 spin_unlock(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001100
1101 /* Wait for the WL worker by taking the @ubi->move_mutex */
1102 mutex_lock(&ubi->move_mutex);
1103 mutex_unlock(&ubi->move_mutex);
1104 goto retry;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001105 } else if (e == ubi->move_to) {
1106 /*
1107 * User is putting the physical eraseblock which was selected
1108 * as the target the data is moved to. It may happen if the EBA
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001109 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1110 * but the WL sub-system has not put the PEB to the "used" tree
1111 * yet, but it is about to do this. So we just set a flag which
1112 * will tell the WL worker that the PEB is not needed anymore
1113 * and should be scheduled for erasure.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001114 */
1115 dbg_wl("PEB %d is the target of data moving", pnum);
1116 ubi_assert(!ubi->move_to_put);
1117 ubi->move_to_put = 1;
1118 spin_unlock(&ubi->wl_lock);
1119 return 0;
1120 } else {
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001121 if (in_wl_tree(e, &ubi->used)) {
1122 paranoid_check_in_wl_tree(e, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001123 rb_erase(&e->u.rb, &ubi->used);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001124 } else if (in_wl_tree(e, &ubi->scrub)) {
1125 paranoid_check_in_wl_tree(e, &ubi->scrub);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001126 rb_erase(&e->u.rb, &ubi->scrub);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001127 } else {
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001128 err = prot_queue_del(ubi, e->pnum);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001129 if (err) {
1130 ubi_err("PEB %d not found", pnum);
1131 ubi_ro_mode(ubi);
1132 spin_unlock(&ubi->wl_lock);
1133 return err;
1134 }
1135 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001136 }
1137 spin_unlock(&ubi->wl_lock);
1138
1139 err = schedule_erase(ubi, e, torture);
1140 if (err) {
1141 spin_lock(&ubi->wl_lock);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001142 wl_tree_add(e, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001143 spin_unlock(&ubi->wl_lock);
1144 }
1145
1146 return err;
1147}
1148
1149/**
1150 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1151 * @ubi: UBI device description object
1152 * @pnum: the physical eraseblock to schedule
1153 *
1154 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1155 * needs scrubbing. This function schedules a physical eraseblock for
1156 * scrubbing which is done in background. This function returns zero in case of
1157 * success and a negative error code in case of failure.
1158 */
1159int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1160{
1161 struct ubi_wl_entry *e;
1162
Artem Bityutskiy8c1e6ee2008-07-18 12:20:23 +03001163 dbg_msg("schedule PEB %d for scrubbing", pnum);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001164
1165retry:
1166 spin_lock(&ubi->wl_lock);
1167 e = ubi->lookuptbl[pnum];
1168 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1169 spin_unlock(&ubi->wl_lock);
1170 return 0;
1171 }
1172
1173 if (e == ubi->move_to) {
1174 /*
1175 * This physical eraseblock was used to move data to. The data
1176 * was moved but the PEB was not yet inserted to the proper
1177 * tree. We should just wait a little and let the WL worker
1178 * proceed.
1179 */
1180 spin_unlock(&ubi->wl_lock);
1181 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1182 yield();
1183 goto retry;
1184 }
1185
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001186 if (in_wl_tree(e, &ubi->used)) {
1187 paranoid_check_in_wl_tree(e, &ubi->used);
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001188 rb_erase(&e->u.rb, &ubi->used);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001189 } else {
1190 int err;
1191
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001192 err = prot_queue_del(ubi, e->pnum);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001193 if (err) {
1194 ubi_err("PEB %d not found", pnum);
1195 ubi_ro_mode(ubi);
1196 spin_unlock(&ubi->wl_lock);
1197 return err;
1198 }
1199 }
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001200
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001201 wl_tree_add(e, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001202 spin_unlock(&ubi->wl_lock);
1203
1204 /*
1205 * Technically scrubbing is the same as wear-leveling, so it is done
1206 * by the WL worker.
1207 */
1208 return ensure_wear_leveling(ubi);
1209}
1210
1211/**
1212 * ubi_wl_flush - flush all pending works.
1213 * @ubi: UBI device description object
1214 *
1215 * This function returns zero in case of success and a negative error code in
1216 * case of failure.
1217 */
1218int ubi_wl_flush(struct ubi_device *ubi)
1219{
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001220 int err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001221
1222 /*
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001223 * Erase while the pending works queue is not empty, but not more than
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001224 * the number of currently pending works.
1225 */
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001226 dbg_wl("flush (%d pending works)", ubi->works_count);
1227 while (ubi->works_count) {
1228 err = do_work(ubi);
1229 if (err)
1230 return err;
1231 }
1232
1233 /*
1234 * Make sure all the works which have been done in parallel are
1235 * finished.
1236 */
1237 down_write(&ubi->work_sem);
1238 up_write(&ubi->work_sem);
1239
1240 /*
Artem Bityutskiy6fa6f5b2008-12-05 13:37:02 +02001241 * And in case last was the WL worker and it canceled the LEB
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001242 * movement, flush again.
1243 */
1244 while (ubi->works_count) {
1245 dbg_wl("flush more (%d pending works)", ubi->works_count);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001246 err = do_work(ubi);
1247 if (err)
1248 return err;
1249 }
1250
1251 return 0;
1252}
1253
1254/**
1255 * tree_destroy - destroy an RB-tree.
1256 * @root: the root of the tree to destroy
1257 */
1258static void tree_destroy(struct rb_root *root)
1259{
1260 struct rb_node *rb;
1261 struct ubi_wl_entry *e;
1262
1263 rb = root->rb_node;
1264 while (rb) {
1265 if (rb->rb_left)
1266 rb = rb->rb_left;
1267 else if (rb->rb_right)
1268 rb = rb->rb_right;
1269 else {
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001270 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001271
1272 rb = rb_parent(rb);
1273 if (rb) {
Xiaochuan-Xu23553b22008-12-09 19:44:12 +08001274 if (rb->rb_left == &e->u.rb)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001275 rb->rb_left = NULL;
1276 else
1277 rb->rb_right = NULL;
1278 }
1279
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001280 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001281 }
1282 }
1283}
1284
1285/**
1286 * ubi_thread - UBI background thread.
1287 * @u: the UBI device description object pointer
1288 */
Artem Bityutskiycdfa7882007-12-17 20:33:20 +02001289int ubi_thread(void *u)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001290{
1291 int failures = 0;
1292 struct ubi_device *ubi = u;
1293
1294 ubi_msg("background thread \"%s\" started, PID %d",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001295 ubi->bgt_name, task_pid_nr(current));
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001296
Rafael J. Wysocki83144182007-07-17 04:03:35 -07001297 set_freezable();
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001298 for (;;) {
1299 int err;
1300
1301 if (kthread_should_stop())
Kyungmin Parkcadb40c2008-05-22 10:32:18 +09001302 break;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001303
1304 if (try_to_freeze())
1305 continue;
1306
1307 spin_lock(&ubi->wl_lock);
1308 if (list_empty(&ubi->works) || ubi->ro_mode ||
1309 !ubi->thread_enabled) {
1310 set_current_state(TASK_INTERRUPTIBLE);
1311 spin_unlock(&ubi->wl_lock);
1312 schedule();
1313 continue;
1314 }
1315 spin_unlock(&ubi->wl_lock);
1316
1317 err = do_work(ubi);
1318 if (err) {
1319 ubi_err("%s: work failed with error code %d",
1320 ubi->bgt_name, err);
1321 if (failures++ > WL_MAX_FAILURES) {
1322 /*
1323 * Too many failures, disable the thread and
1324 * switch to read-only mode.
1325 */
1326 ubi_msg("%s: %d consecutive failures",
1327 ubi->bgt_name, WL_MAX_FAILURES);
1328 ubi_ro_mode(ubi);
Vitaliy Gusev2ad49882008-11-05 18:27:18 +03001329 ubi->thread_enabled = 0;
1330 continue;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001331 }
1332 } else
1333 failures = 0;
1334
1335 cond_resched();
1336 }
1337
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001338 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1339 return 0;
1340}
1341
1342/**
1343 * cancel_pending - cancel all pending works.
1344 * @ubi: UBI device description object
1345 */
1346static void cancel_pending(struct ubi_device *ubi)
1347{
1348 while (!list_empty(&ubi->works)) {
1349 struct ubi_work *wrk;
1350
1351 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1352 list_del(&wrk->list);
1353 wrk->func(ubi, wrk, 1);
1354 ubi->works_count -= 1;
1355 ubi_assert(ubi->works_count >= 0);
1356 }
1357}
1358
1359/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001360 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001361 * @ubi: UBI device description object
1362 * @si: scanning information
1363 *
1364 * This function returns zero in case of success, and a negative error code in
1365 * case of failure.
1366 */
1367int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1368{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001369 int err, i;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001370 struct rb_node *rb1, *rb2;
1371 struct ubi_scan_volume *sv;
1372 struct ubi_scan_leb *seb, *tmp;
1373 struct ubi_wl_entry *e;
1374
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001375 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001376 spin_lock_init(&ubi->wl_lock);
Artem Bityutskiy43f9b252007-12-18 15:06:55 +02001377 mutex_init(&ubi->move_mutex);
Artem Bityutskiy593dd332007-12-18 15:54:35 +02001378 init_rwsem(&ubi->work_sem);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001379 ubi->max_ec = si->max_ec;
1380 INIT_LIST_HEAD(&ubi->works);
1381
1382 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1383
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001384 err = -ENOMEM;
1385 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1386 if (!ubi->lookuptbl)
Artem Bityutskiycdfa7882007-12-17 20:33:20 +02001387 return err;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001388
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001389 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1390 INIT_LIST_HEAD(&ubi->pq[i]);
1391 ubi->pq_head = 0;
1392
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001393 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1394 cond_resched();
1395
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001396 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001397 if (!e)
1398 goto out_free;
1399
1400 e->pnum = seb->pnum;
1401 e->ec = seb->ec;
1402 ubi->lookuptbl[e->pnum] = e;
1403 if (schedule_erase(ubi, e, 0)) {
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001404 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001405 goto out_free;
1406 }
1407 }
1408
1409 list_for_each_entry(seb, &si->free, u.list) {
1410 cond_resched();
1411
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001412 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001413 if (!e)
1414 goto out_free;
1415
1416 e->pnum = seb->pnum;
1417 e->ec = seb->ec;
1418 ubi_assert(e->ec >= 0);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001419 wl_tree_add(e, &ubi->free);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001420 ubi->lookuptbl[e->pnum] = e;
1421 }
1422
1423 list_for_each_entry(seb, &si->corr, u.list) {
1424 cond_resched();
1425
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001426 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001427 if (!e)
1428 goto out_free;
1429
1430 e->pnum = seb->pnum;
1431 e->ec = seb->ec;
1432 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) {
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001434 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001435 goto out_free;
1436 }
1437 }
1438
1439 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1440 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1441 cond_resched();
1442
Artem Bityutskiy06b68ba2007-12-16 12:49:01 +02001443 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001444 if (!e)
1445 goto out_free;
1446
1447 e->pnum = seb->pnum;
1448 e->ec = seb->ec;
1449 ubi->lookuptbl[e->pnum] = e;
1450 if (!seb->scrub) {
1451 dbg_wl("add PEB %d EC %d to the used tree",
1452 e->pnum, e->ec);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001453 wl_tree_add(e, &ubi->used);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001454 } else {
1455 dbg_wl("add PEB %d EC %d to the scrub tree",
1456 e->pnum, e->ec);
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001457 wl_tree_add(e, &ubi->scrub);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001458 }
1459 }
1460 }
1461
Artem Bityutskiy5abde382007-09-13 14:48:20 +03001462 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001463 ubi_err("no enough physical eraseblocks (%d, need %d)",
1464 ubi->avail_pebs, WL_RESERVED_PEBS);
1465 goto out_free;
1466 }
1467 ubi->avail_pebs -= WL_RESERVED_PEBS;
1468 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1469
1470 /* Schedule wear-leveling if needed */
1471 err = ensure_wear_leveling(ubi);
1472 if (err)
1473 goto out_free;
1474
1475 return 0;
1476
1477out_free:
1478 cancel_pending(ubi);
1479 tree_destroy(&ubi->used);
1480 tree_destroy(&ubi->free);
1481 tree_destroy(&ubi->scrub);
1482 kfree(ubi->lookuptbl);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001483 return err;
1484}
1485
1486/**
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001487 * protection_queue_destroy - destroy the protection queue.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001488 * @ubi: UBI device description object
1489 */
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001490static void protection_queue_destroy(struct ubi_device *ubi)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001491{
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001492 int i;
1493 struct ubi_wl_entry *e, *tmp;
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001494
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001495 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1496 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1497 list_del(&e->u.list);
1498 kmem_cache_free(ubi_wl_entry_slab, e);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001499 }
1500 }
1501}
1502
1503/**
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001504 * ubi_wl_close - close the wear-leveling sub-system.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001505 * @ubi: UBI device description object
1506 */
1507void ubi_wl_close(struct ubi_device *ubi)
1508{
Artem Bityutskiy85c6e6e2008-07-16 10:25:56 +03001509 dbg_wl("close the WL sub-system");
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001510 cancel_pending(ubi);
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001511 protection_queue_destroy(ubi);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001512 tree_destroy(&ubi->used);
1513 tree_destroy(&ubi->free);
1514 tree_destroy(&ubi->scrub);
1515 kfree(ubi->lookuptbl);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001516}
1517
1518#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1519
1520/**
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001521 * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001522 * @ubi: UBI device description object
1523 * @pnum: the physical eraseblock number to check
1524 * @ec: the erase counter to check
1525 *
1526 * This function returns zero if the erase counter of physical eraseblock @pnum
1527 * is equivalent to @ec, %1 if not, and a negative error code if an error
1528 * occurred.
1529 */
Artem Bityutskiye88d6e102007-08-29 14:51:52 +03001530static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001531{
1532 int err;
1533 long long read_ec;
1534 struct ubi_ec_hdr *ec_hdr;
1535
Artem Bityutskiy33818bb2007-08-28 21:29:32 +03001536 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001537 if (!ec_hdr)
1538 return -ENOMEM;
1539
1540 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1541 if (err && err != UBI_IO_BITFLIPS) {
1542 /* The header does not have to exist */
1543 err = 0;
1544 goto out_free;
1545 }
1546
Christoph Hellwig3261ebd2007-05-21 17:41:46 +03001547 read_ec = be64_to_cpu(ec_hdr->ec);
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001548 if (ec != read_ec) {
1549 ubi_err("paranoid check failed for PEB %d", pnum);
1550 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1551 ubi_dbg_dump_stack();
1552 err = 1;
1553 } else
1554 err = 0;
1555
1556out_free:
1557 kfree(ec_hdr);
1558 return err;
1559}
1560
1561/**
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001562 * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001563 * @e: the wear-leveling entry to check
1564 * @root: the root of the tree
1565 *
Artem Bityutskiyebaaf1a2008-07-18 13:34:32 +03001566 * This function returns zero if @e is in the @root RB-tree and %1 if it is
1567 * not.
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001568 */
1569static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1570 struct rb_root *root)
1571{
1572 if (in_wl_tree(e, root))
1573 return 0;
1574
1575 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1576 e->pnum, e->ec, root);
1577 ubi_dbg_dump_stack();
1578 return 1;
1579}
1580
Xiaochuan-Xu7b6c32d2008-12-15 21:07:41 +08001581/**
1582 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1583 * queue.
1584 * @ubi: UBI device description object
1585 * @e: the wear-leveling entry to check
1586 *
1587 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1588 */
1589static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1590{
1591 struct ubi_wl_entry *p;
1592 int i;
1593
1594 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1595 list_for_each_entry(p, &ubi->pq[i], u.list)
1596 if (p == e)
1597 return 0;
1598
1599 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1600 e->pnum, e->ec);
1601 ubi_dbg_dump_stack();
1602 return 1;
1603}
Artem B. Bityutskiy801c1352006-06-27 12:22:22 +04001604#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */