]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/mtd/ubi/wl.c
Merge branches 'pm-avs' and 'pm-cpuidle'
[linux.git] / drivers / mtd / ubi / wl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) International Business Machines Corp., 2006
4  *
5  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6  */
7
8 /*
9  * UBI wear-leveling sub-system.
10  *
11  * This sub-system is responsible for wear-leveling. It works in terms of
12  * physical eraseblocks and erase counters and knows nothing about logical
13  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14  * eraseblocks are of two types - used and free. Used physical eraseblocks are
15  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17  *
18  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19  * header. The rest of the physical eraseblock contains only %0xFF bytes.
20  *
21  * When physical eraseblocks are returned to the WL sub-system by means of the
22  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23  * done asynchronously in context of the per-UBI device background thread,
24  * which is also managed by the WL sub-system.
25  *
26  * The wear-leveling is ensured by means of moving the contents of used
27  * physical eraseblocks with low erase counter to free physical eraseblocks
28  * with high erase counter.
29  *
30  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31  * bad.
32  *
33  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34  * in a physical eraseblock, it has to be moved. Technically this is the same
35  * as moving it for wear-leveling reasons.
36  *
37  * As it was said, for the UBI sub-system all physical eraseblocks are either
38  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40  * RB-trees, as well as (temporarily) in the @wl->pq queue.
41  *
42  * When the WL sub-system returns a physical eraseblock, the physical
43  * eraseblock is protected from being moved for some "time". For this reason,
44  * the physical eraseblock is not directly moved from the @wl->free tree to the
45  * @wl->used tree. There is a protection queue in between where this
46  * physical eraseblock is temporarily stored (@wl->pq).
47  *
48  * All this protection stuff is needed because:
49  *  o we don't want to move physical eraseblocks just after we have given them
50  *    to the user; instead, we first want to let users fill them up with data;
51  *
52  *  o there is a chance that the user will put the physical eraseblock very
53  *    soon, so it makes sense not to move it for some time, but wait.
54  *
55  * Physical eraseblocks stay protected only for limited time. But the "time" is
56  * measured in erase cycles in this case. This is implemented with help of the
57  * protection queue. Eraseblocks are put to the tail of this queue when they
58  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59  * head of the queue on each erase operation (for any eraseblock). So the
60  * length of the queue defines how may (global) erase cycles PEBs are protected.
61  *
62  * To put it differently, each physical eraseblock has 2 main states: free and
63  * used. The former state corresponds to the @wl->free tree. The latter state
64  * is split up on several sub-states:
65  * o the WL movement is allowed (@wl->used tree);
66  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67  *   erroneous - e.g., there was a read error;
68  * o the WL movement is temporarily prohibited (@wl->pq queue);
69  * o scrubbing is needed (@wl->scrub tree).
70  *
71  * Depending on the sub-state, wear-leveling entries of the used physical
72  * eraseblocks may be kept in one of those structures.
73  *
74  * Note, in this implementation, we keep a small in-RAM object for each physical
75  * eraseblock. This is surely not a scalable solution. But it appears to be good
76  * enough for moderately large flashes and it is simple. In future, one may
77  * re-work this sub-system and make it more scalable.
78  *
79  * At the moment this sub-system does not utilize the sequence number, which
80  * was introduced relatively recently. But it would be wise to do this because
81  * the sequence number of a logical eraseblock characterizes how old is it. For
82  * example, when we move a PEB with low erase counter, and we need to pick the
83  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84  * pick target PEB with an average EC if our PEB is not very "old". This is a
85  * room for future re-works of the WL sub-system.
86  */
87
88 #include <linux/slab.h>
89 #include <linux/crc32.h>
90 #include <linux/freezer.h>
91 #include <linux/kthread.h>
92 #include "ubi.h"
93 #include "wl.h"
94
95 /* Number of physical eraseblocks reserved for wear-leveling purposes */
96 #define WL_RESERVED_PEBS 1
97
98 /*
99  * Maximum difference between two erase counters. If this threshold is
100  * exceeded, the WL sub-system starts moving data from used physical
101  * eraseblocks with low erase counter to free physical eraseblocks with high
102  * erase counter.
103  */
104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105
106 /*
107  * When a physical eraseblock is moved, the WL sub-system has to pick the target
108  * physical eraseblock to move to. The simplest way would be just to pick the
109  * one with the highest erase counter. But in certain workloads this could lead
110  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
111  * situation when the picked physical eraseblock is constantly erased after the
112  * data is written to it. So, we have a constant which limits the highest erase
113  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
114  * does not pick eraseblocks with erase counter greater than the lowest erase
115  * counter plus %WL_FREE_MAX_DIFF.
116  */
117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118
119 /*
120  * Maximum number of consecutive background thread failures which is enough to
121  * switch to read-only mode.
122  */
123 #define WL_MAX_FAILURES 32
124
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
127                                  struct ubi_wl_entry *e, struct rb_root *root);
128 static int self_check_in_pq(const struct ubi_device *ubi,
129                             struct ubi_wl_entry *e);
130
131 /**
132  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
133  * @e: the wear-leveling entry to add
134  * @root: the root of the tree
135  *
136  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
137  * the @ubi->used and @ubi->free RB-trees.
138  */
139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140 {
141         struct rb_node **p, *parent = NULL;
142
143         p = &root->rb_node;
144         while (*p) {
145                 struct ubi_wl_entry *e1;
146
147                 parent = *p;
148                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149
150                 if (e->ec < e1->ec)
151                         p = &(*p)->rb_left;
152                 else if (e->ec > e1->ec)
153                         p = &(*p)->rb_right;
154                 else {
155                         ubi_assert(e->pnum != e1->pnum);
156                         if (e->pnum < e1->pnum)
157                                 p = &(*p)->rb_left;
158                         else
159                                 p = &(*p)->rb_right;
160                 }
161         }
162
163         rb_link_node(&e->u.rb, parent, p);
164         rb_insert_color(&e->u.rb, root);
165 }
166
167 /**
168  * wl_tree_destroy - destroy a wear-leveling entry.
169  * @ubi: UBI device description object
170  * @e: the wear-leveling entry to add
171  *
172  * This function destroys a wear leveling entry and removes
173  * the reference from the lookup table.
174  */
175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176 {
177         ubi->lookuptbl[e->pnum] = NULL;
178         kmem_cache_free(ubi_wl_entry_slab, e);
179 }
180
181 /**
182  * do_work - do one pending work.
183  * @ubi: UBI device description object
184  *
185  * This function returns zero in case of success and a negative error code in
186  * case of failure.
187  */
188 static int do_work(struct ubi_device *ubi)
189 {
190         int err;
191         struct ubi_work *wrk;
192
193         cond_resched();
194
195         /*
196          * @ubi->work_sem is used to synchronize with the workers. Workers take
197          * it in read mode, so many of them may be doing works at a time. But
198          * the queue flush code has to be sure the whole queue of works is
199          * done, and it takes the mutex in write mode.
200          */
201         down_read(&ubi->work_sem);
202         spin_lock(&ubi->wl_lock);
203         if (list_empty(&ubi->works)) {
204                 spin_unlock(&ubi->wl_lock);
205                 up_read(&ubi->work_sem);
206                 return 0;
207         }
208
209         wrk = list_entry(ubi->works.next, struct ubi_work, list);
210         list_del(&wrk->list);
211         ubi->works_count -= 1;
212         ubi_assert(ubi->works_count >= 0);
213         spin_unlock(&ubi->wl_lock);
214
215         /*
216          * Call the worker function. Do not touch the work structure
217          * after this call as it will have been freed or reused by that
218          * time by the worker function.
219          */
220         err = wrk->func(ubi, wrk, 0);
221         if (err)
222                 ubi_err(ubi, "work failed with error code %d", err);
223         up_read(&ubi->work_sem);
224
225         return err;
226 }
227
228 /**
229  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
230  * @e: the wear-leveling entry to check
231  * @root: the root of the tree
232  *
233  * This function returns non-zero if @e is in the @root RB-tree and zero if it
234  * is not.
235  */
236 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
237 {
238         struct rb_node *p;
239
240         p = root->rb_node;
241         while (p) {
242                 struct ubi_wl_entry *e1;
243
244                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
245
246                 if (e->pnum == e1->pnum) {
247                         ubi_assert(e == e1);
248                         return 1;
249                 }
250
251                 if (e->ec < e1->ec)
252                         p = p->rb_left;
253                 else if (e->ec > e1->ec)
254                         p = p->rb_right;
255                 else {
256                         ubi_assert(e->pnum != e1->pnum);
257                         if (e->pnum < e1->pnum)
258                                 p = p->rb_left;
259                         else
260                                 p = p->rb_right;
261                 }
262         }
263
264         return 0;
265 }
266
267 /**
268  * in_pq - check if a wear-leveling entry is present in the protection queue.
269  * @ubi: UBI device description object
270  * @e: the wear-leveling entry to check
271  *
272  * This function returns non-zero if @e is in the protection queue and zero
273  * if it is not.
274  */
275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276 {
277         struct ubi_wl_entry *p;
278         int i;
279
280         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281                 list_for_each_entry(p, &ubi->pq[i], u.list)
282                         if (p == e)
283                                 return 1;
284
285         return 0;
286 }
287
288 /**
289  * prot_queue_add - add physical eraseblock to the protection queue.
290  * @ubi: UBI device description object
291  * @e: the physical eraseblock to add
292  *
293  * This function adds @e to the tail of the protection queue @ubi->pq, where
294  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
295  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
296  * be locked.
297  */
298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
299 {
300         int pq_tail = ubi->pq_head - 1;
301
302         if (pq_tail < 0)
303                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
304         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
305         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
306         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
307 }
308
309 /**
310  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
311  * @ubi: UBI device description object
312  * @root: the RB-tree where to look for
313  * @diff: maximum possible difference from the smallest erase counter
314  *
315  * This function looks for a wear leveling entry with erase counter closest to
316  * min + @diff, where min is the smallest erase counter.
317  */
318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
319                                           struct rb_root *root, int diff)
320 {
321         struct rb_node *p;
322         struct ubi_wl_entry *e;
323         int max;
324
325         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
326         max = e->ec + diff;
327
328         p = root->rb_node;
329         while (p) {
330                 struct ubi_wl_entry *e1;
331
332                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333                 if (e1->ec >= max)
334                         p = p->rb_left;
335                 else {
336                         p = p->rb_right;
337                         e = e1;
338                 }
339         }
340
341         return e;
342 }
343
344 /**
345  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
346  * @ubi: UBI device description object
347  * @root: the RB-tree where to look for
348  *
349  * This function looks for a wear leveling entry with medium erase counter,
350  * but not greater or equivalent than the lowest erase counter plus
351  * %WL_FREE_MAX_DIFF/2.
352  */
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
354                                                struct rb_root *root)
355 {
356         struct ubi_wl_entry *e, *first, *last;
357
358         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
359         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
360
361         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
362                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
363
364                 /* If no fastmap has been written and this WL entry can be used
365                  * as anchor PEB, hold it back and return the second best
366                  * WL entry such that fastmap can use the anchor PEB later. */
367                 e = may_reserve_for_fm(ubi, e, root);
368         } else
369                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
370
371         return e;
372 }
373
374 /**
375  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
376  * refill_wl_user_pool().
377  * @ubi: UBI device description object
378  *
379  * This function returns a a wear leveling entry in case of success and
380  * NULL in case of failure.
381  */
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
383 {
384         struct ubi_wl_entry *e;
385
386         e = find_mean_wl_entry(ubi, &ubi->free);
387         if (!e) {
388                 ubi_err(ubi, "no free eraseblocks");
389                 return NULL;
390         }
391
392         self_check_in_wl_tree(ubi, e, &ubi->free);
393
394         /*
395          * Move the physical eraseblock to the protection queue where it will
396          * be protected from being moved for some time.
397          */
398         rb_erase(&e->u.rb, &ubi->free);
399         ubi->free_count--;
400         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
401
402         return e;
403 }
404
405 /**
406  * prot_queue_del - remove a physical eraseblock from the protection queue.
407  * @ubi: UBI device description object
408  * @pnum: the physical eraseblock to remove
409  *
410  * This function deletes PEB @pnum from the protection queue and returns zero
411  * in case of success and %-ENODEV if the PEB was not found.
412  */
413 static int prot_queue_del(struct ubi_device *ubi, int pnum)
414 {
415         struct ubi_wl_entry *e;
416
417         e = ubi->lookuptbl[pnum];
418         if (!e)
419                 return -ENODEV;
420
421         if (self_check_in_pq(ubi, e))
422                 return -ENODEV;
423
424         list_del(&e->u.list);
425         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
426         return 0;
427 }
428
429 /**
430  * sync_erase - synchronously erase a physical eraseblock.
431  * @ubi: UBI device description object
432  * @e: the the physical eraseblock to erase
433  * @torture: if the physical eraseblock has to be tortured
434  *
435  * This function returns zero in case of success and a negative error code in
436  * case of failure.
437  */
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
439                       int torture)
440 {
441         int err;
442         struct ubi_ec_hdr *ec_hdr;
443         unsigned long long ec = e->ec;
444
445         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
446
447         err = self_check_ec(ubi, e->pnum, e->ec);
448         if (err)
449                 return -EINVAL;
450
451         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
452         if (!ec_hdr)
453                 return -ENOMEM;
454
455         err = ubi_io_sync_erase(ubi, e->pnum, torture);
456         if (err < 0)
457                 goto out_free;
458
459         ec += err;
460         if (ec > UBI_MAX_ERASECOUNTER) {
461                 /*
462                  * Erase counter overflow. Upgrade UBI and use 64-bit
463                  * erase counters internally.
464                  */
465                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
466                         e->pnum, ec);
467                 err = -EINVAL;
468                 goto out_free;
469         }
470
471         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
472
473         ec_hdr->ec = cpu_to_be64(ec);
474
475         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
476         if (err)
477                 goto out_free;
478
479         e->ec = ec;
480         spin_lock(&ubi->wl_lock);
481         if (e->ec > ubi->max_ec)
482                 ubi->max_ec = e->ec;
483         spin_unlock(&ubi->wl_lock);
484
485 out_free:
486         kfree(ec_hdr);
487         return err;
488 }
489
490 /**
491  * serve_prot_queue - check if it is time to stop protecting PEBs.
492  * @ubi: UBI device description object
493  *
494  * This function is called after each erase operation and removes PEBs from the
495  * tail of the protection queue. These PEBs have been protected for long enough
496  * and should be moved to the used tree.
497  */
498 static void serve_prot_queue(struct ubi_device *ubi)
499 {
500         struct ubi_wl_entry *e, *tmp;
501         int count;
502
503         /*
504          * There may be several protected physical eraseblock to remove,
505          * process them all.
506          */
507 repeat:
508         count = 0;
509         spin_lock(&ubi->wl_lock);
510         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
511                 dbg_wl("PEB %d EC %d protection over, move to used tree",
512                         e->pnum, e->ec);
513
514                 list_del(&e->u.list);
515                 wl_tree_add(e, &ubi->used);
516                 if (count++ > 32) {
517                         /*
518                          * Let's be nice and avoid holding the spinlock for
519                          * too long.
520                          */
521                         spin_unlock(&ubi->wl_lock);
522                         cond_resched();
523                         goto repeat;
524                 }
525         }
526
527         ubi->pq_head += 1;
528         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
529                 ubi->pq_head = 0;
530         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
531         spin_unlock(&ubi->wl_lock);
532 }
533
534 /**
535  * __schedule_ubi_work - schedule a work.
536  * @ubi: UBI device description object
537  * @wrk: the work to schedule
538  *
539  * This function adds a work defined by @wrk to the tail of the pending works
540  * list. Can only be used if ubi->work_sem is already held in read mode!
541  */
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
543 {
544         spin_lock(&ubi->wl_lock);
545         list_add_tail(&wrk->list, &ubi->works);
546         ubi_assert(ubi->works_count >= 0);
547         ubi->works_count += 1;
548         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
549                 wake_up_process(ubi->bgt_thread);
550         spin_unlock(&ubi->wl_lock);
551 }
552
553 /**
554  * schedule_ubi_work - schedule a work.
555  * @ubi: UBI device description object
556  * @wrk: the work to schedule
557  *
558  * This function adds a work defined by @wrk to the tail of the pending works
559  * list.
560  */
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562 {
563         down_read(&ubi->work_sem);
564         __schedule_ubi_work(ubi, wrk);
565         up_read(&ubi->work_sem);
566 }
567
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
569                         int shutdown);
570
571 /**
572  * schedule_erase - schedule an erase work.
573  * @ubi: UBI device description object
574  * @e: the WL entry of the physical eraseblock to erase
575  * @vol_id: the volume ID that last used this PEB
576  * @lnum: the last used logical eraseblock number for the PEB
577  * @torture: if the physical eraseblock has to be tortured
578  *
579  * This function returns zero in case of success and a %-ENOMEM in case of
580  * failure.
581  */
582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583                           int vol_id, int lnum, int torture, bool nested)
584 {
585         struct ubi_work *wl_wrk;
586
587         ubi_assert(e);
588
589         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
590                e->pnum, e->ec, torture);
591
592         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
593         if (!wl_wrk)
594                 return -ENOMEM;
595
596         wl_wrk->func = &erase_worker;
597         wl_wrk->e = e;
598         wl_wrk->vol_id = vol_id;
599         wl_wrk->lnum = lnum;
600         wl_wrk->torture = torture;
601
602         if (nested)
603                 __schedule_ubi_work(ubi, wl_wrk);
604         else
605                 schedule_ubi_work(ubi, wl_wrk);
606         return 0;
607 }
608
609 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
610 /**
611  * do_sync_erase - run the erase worker synchronously.
612  * @ubi: UBI device description object
613  * @e: the WL entry of the physical eraseblock to erase
614  * @vol_id: the volume ID that last used this PEB
615  * @lnum: the last used logical eraseblock number for the PEB
616  * @torture: if the physical eraseblock has to be tortured
617  *
618  */
619 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
620                          int vol_id, int lnum, int torture)
621 {
622         struct ubi_work wl_wrk;
623
624         dbg_wl("sync erase of PEB %i", e->pnum);
625
626         wl_wrk.e = e;
627         wl_wrk.vol_id = vol_id;
628         wl_wrk.lnum = lnum;
629         wl_wrk.torture = torture;
630
631         return __erase_worker(ubi, &wl_wrk);
632 }
633
634 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
635 /**
636  * wear_leveling_worker - wear-leveling worker function.
637  * @ubi: UBI device description object
638  * @wrk: the work object
639  * @shutdown: non-zero if the worker has to free memory and exit
640  * because the WL-subsystem is shutting down
641  *
642  * This function copies a more worn out physical eraseblock to a less worn out
643  * one. Returns zero in case of success and a negative error code in case of
644  * failure.
645  */
646 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
647                                 int shutdown)
648 {
649         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
650         int erase = 0, keep = 0, vol_id = -1, lnum = -1;
651         struct ubi_wl_entry *e1, *e2;
652         struct ubi_vid_io_buf *vidb;
653         struct ubi_vid_hdr *vid_hdr;
654         int dst_leb_clean = 0;
655
656         kfree(wrk);
657         if (shutdown)
658                 return 0;
659
660         vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
661         if (!vidb)
662                 return -ENOMEM;
663
664         vid_hdr = ubi_get_vid_hdr(vidb);
665
666         down_read(&ubi->fm_eba_sem);
667         mutex_lock(&ubi->move_mutex);
668         spin_lock(&ubi->wl_lock);
669         ubi_assert(!ubi->move_from && !ubi->move_to);
670         ubi_assert(!ubi->move_to_put);
671
672         if (!ubi->free.rb_node ||
673             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
674                 /*
675                  * No free physical eraseblocks? Well, they must be waiting in
676                  * the queue to be erased. Cancel movement - it will be
677                  * triggered again when a free physical eraseblock appears.
678                  *
679                  * No used physical eraseblocks? They must be temporarily
680                  * protected from being moved. They will be moved to the
681                  * @ubi->used tree later and the wear-leveling will be
682                  * triggered again.
683                  */
684                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
685                        !ubi->free.rb_node, !ubi->used.rb_node);
686                 goto out_cancel;
687         }
688
689 #ifdef CONFIG_MTD_UBI_FASTMAP
690         if (ubi->fm_do_produce_anchor) {
691                 e1 = find_anchor_wl_entry(&ubi->used);
692                 if (!e1)
693                         goto out_cancel;
694                 e2 = get_peb_for_wl(ubi);
695                 if (!e2)
696                         goto out_cancel;
697
698                 /*
699                  * Anchor move within the anchor area is useless.
700                  */
701                 if (e2->pnum < UBI_FM_MAX_START)
702                         goto out_cancel;
703
704                 self_check_in_wl_tree(ubi, e1, &ubi->used);
705                 rb_erase(&e1->u.rb, &ubi->used);
706                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
707                 ubi->fm_do_produce_anchor = 0;
708         } else if (!ubi->scrub.rb_node) {
709 #else
710         if (!ubi->scrub.rb_node) {
711 #endif
712                 /*
713                  * Now pick the least worn-out used physical eraseblock and a
714                  * highly worn-out free physical eraseblock. If the erase
715                  * counters differ much enough, start wear-leveling.
716                  */
717                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
718                 e2 = get_peb_for_wl(ubi);
719                 if (!e2)
720                         goto out_cancel;
721
722                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
723                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
724                                e1->ec, e2->ec);
725
726                         /* Give the unused PEB back */
727                         wl_tree_add(e2, &ubi->free);
728                         ubi->free_count++;
729                         goto out_cancel;
730                 }
731                 self_check_in_wl_tree(ubi, e1, &ubi->used);
732                 rb_erase(&e1->u.rb, &ubi->used);
733                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
734                        e1->pnum, e1->ec, e2->pnum, e2->ec);
735         } else {
736                 /* Perform scrubbing */
737                 scrubbing = 1;
738                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
739                 e2 = get_peb_for_wl(ubi);
740                 if (!e2)
741                         goto out_cancel;
742
743                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
744                 rb_erase(&e1->u.rb, &ubi->scrub);
745                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
746         }
747
748         ubi->move_from = e1;
749         ubi->move_to = e2;
750         spin_unlock(&ubi->wl_lock);
751
752         /*
753          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
754          * We so far do not know which logical eraseblock our physical
755          * eraseblock (@e1) belongs to. We have to read the volume identifier
756          * header first.
757          *
758          * Note, we are protected from this PEB being unmapped and erased. The
759          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
760          * which is being moved was unmapped.
761          */
762
763         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
764         if (err && err != UBI_IO_BITFLIPS) {
765                 dst_leb_clean = 1;
766                 if (err == UBI_IO_FF) {
767                         /*
768                          * We are trying to move PEB without a VID header. UBI
769                          * always write VID headers shortly after the PEB was
770                          * given, so we have a situation when it has not yet
771                          * had a chance to write it, because it was preempted.
772                          * So add this PEB to the protection queue so far,
773                          * because presumably more data will be written there
774                          * (including the missing VID header), and then we'll
775                          * move it.
776                          */
777                         dbg_wl("PEB %d has no VID header", e1->pnum);
778                         protect = 1;
779                         goto out_not_moved;
780                 } else if (err == UBI_IO_FF_BITFLIPS) {
781                         /*
782                          * The same situation as %UBI_IO_FF, but bit-flips were
783                          * detected. It is better to schedule this PEB for
784                          * scrubbing.
785                          */
786                         dbg_wl("PEB %d has no VID header but has bit-flips",
787                                e1->pnum);
788                         scrubbing = 1;
789                         goto out_not_moved;
790                 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
791                         /*
792                          * While a full scan would detect interrupted erasures
793                          * at attach time we can face them here when attached from
794                          * Fastmap.
795                          */
796                         dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
797                                e1->pnum);
798                         erase = 1;
799                         goto out_not_moved;
800                 }
801
802                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
803                         err, e1->pnum);
804                 goto out_error;
805         }
806
807         vol_id = be32_to_cpu(vid_hdr->vol_id);
808         lnum = be32_to_cpu(vid_hdr->lnum);
809
810         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
811         if (err) {
812                 if (err == MOVE_CANCEL_RACE) {
813                         /*
814                          * The LEB has not been moved because the volume is
815                          * being deleted or the PEB has been put meanwhile. We
816                          * should prevent this PEB from being selected for
817                          * wear-leveling movement again, so put it to the
818                          * protection queue.
819                          */
820                         protect = 1;
821                         dst_leb_clean = 1;
822                         goto out_not_moved;
823                 }
824                 if (err == MOVE_RETRY) {
825                         scrubbing = 1;
826                         dst_leb_clean = 1;
827                         goto out_not_moved;
828                 }
829                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
830                     err == MOVE_TARGET_RD_ERR) {
831                         /*
832                          * Target PEB had bit-flips or write error - torture it.
833                          */
834                         torture = 1;
835                         keep = 1;
836                         goto out_not_moved;
837                 }
838
839                 if (err == MOVE_SOURCE_RD_ERR) {
840                         /*
841                          * An error happened while reading the source PEB. Do
842                          * not switch to R/O mode in this case, and give the
843                          * upper layers a possibility to recover from this,
844                          * e.g. by unmapping corresponding LEB. Instead, just
845                          * put this PEB to the @ubi->erroneous list to prevent
846                          * UBI from trying to move it over and over again.
847                          */
848                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
849                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
850                                         ubi->erroneous_peb_count);
851                                 goto out_error;
852                         }
853                         dst_leb_clean = 1;
854                         erroneous = 1;
855                         goto out_not_moved;
856                 }
857
858                 if (err < 0)
859                         goto out_error;
860
861                 ubi_assert(0);
862         }
863
864         /* The PEB has been successfully moved */
865         if (scrubbing)
866                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
867                         e1->pnum, vol_id, lnum, e2->pnum);
868         ubi_free_vid_buf(vidb);
869
870         spin_lock(&ubi->wl_lock);
871         if (!ubi->move_to_put) {
872                 wl_tree_add(e2, &ubi->used);
873                 e2 = NULL;
874         }
875         ubi->move_from = ubi->move_to = NULL;
876         ubi->move_to_put = ubi->wl_scheduled = 0;
877         spin_unlock(&ubi->wl_lock);
878
879         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
880         if (err) {
881                 if (e2)
882                         wl_entry_destroy(ubi, e2);
883                 goto out_ro;
884         }
885
886         if (e2) {
887                 /*
888                  * Well, the target PEB was put meanwhile, schedule it for
889                  * erasure.
890                  */
891                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
892                        e2->pnum, vol_id, lnum);
893                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
894                 if (err)
895                         goto out_ro;
896         }
897
898         dbg_wl("done");
899         mutex_unlock(&ubi->move_mutex);
900         up_read(&ubi->fm_eba_sem);
901         return 0;
902
903         /*
904          * For some reasons the LEB was not moved, might be an error, might be
905          * something else. @e1 was not changed, so return it back. @e2 might
906          * have been changed, schedule it for erasure.
907          */
908 out_not_moved:
909         if (vol_id != -1)
910                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
911                        e1->pnum, vol_id, lnum, e2->pnum, err);
912         else
913                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
914                        e1->pnum, e2->pnum, err);
915         spin_lock(&ubi->wl_lock);
916         if (protect)
917                 prot_queue_add(ubi, e1);
918         else if (erroneous) {
919                 wl_tree_add(e1, &ubi->erroneous);
920                 ubi->erroneous_peb_count += 1;
921         } else if (scrubbing)
922                 wl_tree_add(e1, &ubi->scrub);
923         else if (keep)
924                 wl_tree_add(e1, &ubi->used);
925         if (dst_leb_clean) {
926                 wl_tree_add(e2, &ubi->free);
927                 ubi->free_count++;
928         }
929
930         ubi_assert(!ubi->move_to_put);
931         ubi->move_from = ubi->move_to = NULL;
932         ubi->wl_scheduled = 0;
933         spin_unlock(&ubi->wl_lock);
934
935         ubi_free_vid_buf(vidb);
936         if (dst_leb_clean) {
937                 ensure_wear_leveling(ubi, 1);
938         } else {
939                 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
940                 if (err)
941                         goto out_ro;
942         }
943
944         if (erase) {
945                 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
946                 if (err)
947                         goto out_ro;
948         }
949
950         mutex_unlock(&ubi->move_mutex);
951         up_read(&ubi->fm_eba_sem);
952         return 0;
953
954 out_error:
955         if (vol_id != -1)
956                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
957                         err, e1->pnum, e2->pnum);
958         else
959                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
960                         err, e1->pnum, vol_id, lnum, e2->pnum);
961         spin_lock(&ubi->wl_lock);
962         ubi->move_from = ubi->move_to = NULL;
963         ubi->move_to_put = ubi->wl_scheduled = 0;
964         spin_unlock(&ubi->wl_lock);
965
966         ubi_free_vid_buf(vidb);
967         wl_entry_destroy(ubi, e1);
968         wl_entry_destroy(ubi, e2);
969
970 out_ro:
971         ubi_ro_mode(ubi);
972         mutex_unlock(&ubi->move_mutex);
973         up_read(&ubi->fm_eba_sem);
974         ubi_assert(err != 0);
975         return err < 0 ? err : -EIO;
976
977 out_cancel:
978         ubi->wl_scheduled = 0;
979         spin_unlock(&ubi->wl_lock);
980         mutex_unlock(&ubi->move_mutex);
981         up_read(&ubi->fm_eba_sem);
982         ubi_free_vid_buf(vidb);
983         return 0;
984 }
985
986 /**
987  * ensure_wear_leveling - schedule wear-leveling if it is needed.
988  * @ubi: UBI device description object
989  * @nested: set to non-zero if this function is called from UBI worker
990  *
991  * This function checks if it is time to start wear-leveling and schedules it
992  * if yes. This function returns zero in case of success and a negative error
993  * code in case of failure.
994  */
995 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
996 {
997         int err = 0;
998         struct ubi_wl_entry *e1;
999         struct ubi_wl_entry *e2;
1000         struct ubi_work *wrk;
1001
1002         spin_lock(&ubi->wl_lock);
1003         if (ubi->wl_scheduled)
1004                 /* Wear-leveling is already in the work queue */
1005                 goto out_unlock;
1006
1007         /*
1008          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1009          * the WL worker has to be scheduled anyway.
1010          */
1011         if (!ubi->scrub.rb_node) {
1012                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1013                         /* No physical eraseblocks - no deal */
1014                         goto out_unlock;
1015
1016                 /*
1017                  * We schedule wear-leveling only if the difference between the
1018                  * lowest erase counter of used physical eraseblocks and a high
1019                  * erase counter of free physical eraseblocks is greater than
1020                  * %UBI_WL_THRESHOLD.
1021                  */
1022                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1023                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1024
1025                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1026                         goto out_unlock;
1027                 dbg_wl("schedule wear-leveling");
1028         } else
1029                 dbg_wl("schedule scrubbing");
1030
1031         ubi->wl_scheduled = 1;
1032         spin_unlock(&ubi->wl_lock);
1033
1034         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1035         if (!wrk) {
1036                 err = -ENOMEM;
1037                 goto out_cancel;
1038         }
1039
1040         wrk->func = &wear_leveling_worker;
1041         if (nested)
1042                 __schedule_ubi_work(ubi, wrk);
1043         else
1044                 schedule_ubi_work(ubi, wrk);
1045         return err;
1046
1047 out_cancel:
1048         spin_lock(&ubi->wl_lock);
1049         ubi->wl_scheduled = 0;
1050 out_unlock:
1051         spin_unlock(&ubi->wl_lock);
1052         return err;
1053 }
1054
1055 /**
1056  * __erase_worker - physical eraseblock erase worker function.
1057  * @ubi: UBI device description object
1058  * @wl_wrk: the work object
1059  * @shutdown: non-zero if the worker has to free memory and exit
1060  * because the WL sub-system is shutting down
1061  *
1062  * This function erases a physical eraseblock and perform torture testing if
1063  * needed. It also takes care about marking the physical eraseblock bad if
1064  * needed. Returns zero in case of success and a negative error code in case of
1065  * failure.
1066  */
1067 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1068 {
1069         struct ubi_wl_entry *e = wl_wrk->e;
1070         int pnum = e->pnum;
1071         int vol_id = wl_wrk->vol_id;
1072         int lnum = wl_wrk->lnum;
1073         int err, available_consumed = 0;
1074
1075         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1076                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1077
1078         err = sync_erase(ubi, e, wl_wrk->torture);
1079         if (!err) {
1080                 spin_lock(&ubi->wl_lock);
1081
1082                 if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
1083                         ubi->fm_anchor = e;
1084                         ubi->fm_do_produce_anchor = 0;
1085                 } else {
1086                         wl_tree_add(e, &ubi->free);
1087                         ubi->free_count++;
1088                 }
1089
1090                 spin_unlock(&ubi->wl_lock);
1091
1092                 /*
1093                  * One more erase operation has happened, take care about
1094                  * protected physical eraseblocks.
1095                  */
1096                 serve_prot_queue(ubi);
1097
1098                 /* And take care about wear-leveling */
1099                 err = ensure_wear_leveling(ubi, 1);
1100                 return err;
1101         }
1102
1103         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1104
1105         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1106             err == -EBUSY) {
1107                 int err1;
1108
1109                 /* Re-schedule the LEB for erasure */
1110                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1111                 if (err1) {
1112                         wl_entry_destroy(ubi, e);
1113                         err = err1;
1114                         goto out_ro;
1115                 }
1116                 return err;
1117         }
1118
1119         wl_entry_destroy(ubi, e);
1120         if (err != -EIO)
1121                 /*
1122                  * If this is not %-EIO, we have no idea what to do. Scheduling
1123                  * this physical eraseblock for erasure again would cause
1124                  * errors again and again. Well, lets switch to R/O mode.
1125                  */
1126                 goto out_ro;
1127
1128         /* It is %-EIO, the PEB went bad */
1129
1130         if (!ubi->bad_allowed) {
1131                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1132                 goto out_ro;
1133         }
1134
1135         spin_lock(&ubi->volumes_lock);
1136         if (ubi->beb_rsvd_pebs == 0) {
1137                 if (ubi->avail_pebs == 0) {
1138                         spin_unlock(&ubi->volumes_lock);
1139                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1140                         goto out_ro;
1141                 }
1142                 ubi->avail_pebs -= 1;
1143                 available_consumed = 1;
1144         }
1145         spin_unlock(&ubi->volumes_lock);
1146
1147         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1148         err = ubi_io_mark_bad(ubi, pnum);
1149         if (err)
1150                 goto out_ro;
1151
1152         spin_lock(&ubi->volumes_lock);
1153         if (ubi->beb_rsvd_pebs > 0) {
1154                 if (available_consumed) {
1155                         /*
1156                          * The amount of reserved PEBs increased since we last
1157                          * checked.
1158                          */
1159                         ubi->avail_pebs += 1;
1160                         available_consumed = 0;
1161                 }
1162                 ubi->beb_rsvd_pebs -= 1;
1163         }
1164         ubi->bad_peb_count += 1;
1165         ubi->good_peb_count -= 1;
1166         ubi_calculate_reserved(ubi);
1167         if (available_consumed)
1168                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1169         else if (ubi->beb_rsvd_pebs)
1170                 ubi_msg(ubi, "%d PEBs left in the reserve",
1171                         ubi->beb_rsvd_pebs);
1172         else
1173                 ubi_warn(ubi, "last PEB from the reserve was used");
1174         spin_unlock(&ubi->volumes_lock);
1175
1176         return err;
1177
1178 out_ro:
1179         if (available_consumed) {
1180                 spin_lock(&ubi->volumes_lock);
1181                 ubi->avail_pebs += 1;
1182                 spin_unlock(&ubi->volumes_lock);
1183         }
1184         ubi_ro_mode(ubi);
1185         return err;
1186 }
1187
1188 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1189                           int shutdown)
1190 {
1191         int ret;
1192
1193         if (shutdown) {
1194                 struct ubi_wl_entry *e = wl_wrk->e;
1195
1196                 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1197                 kfree(wl_wrk);
1198                 wl_entry_destroy(ubi, e);
1199                 return 0;
1200         }
1201
1202         ret = __erase_worker(ubi, wl_wrk);
1203         kfree(wl_wrk);
1204         return ret;
1205 }
1206
1207 /**
1208  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1209  * @ubi: UBI device description object
1210  * @vol_id: the volume ID that last used this PEB
1211  * @lnum: the last used logical eraseblock number for the PEB
1212  * @pnum: physical eraseblock to return
1213  * @torture: if this physical eraseblock has to be tortured
1214  *
1215  * This function is called to return physical eraseblock @pnum to the pool of
1216  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1217  * occurred to this @pnum and it has to be tested. This function returns zero
1218  * in case of success, and a negative error code in case of failure.
1219  */
1220 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1221                    int pnum, int torture)
1222 {
1223         int err;
1224         struct ubi_wl_entry *e;
1225
1226         dbg_wl("PEB %d", pnum);
1227         ubi_assert(pnum >= 0);
1228         ubi_assert(pnum < ubi->peb_count);
1229
1230         down_read(&ubi->fm_protect);
1231
1232 retry:
1233         spin_lock(&ubi->wl_lock);
1234         e = ubi->lookuptbl[pnum];
1235         if (e == ubi->move_from) {
1236                 /*
1237                  * User is putting the physical eraseblock which was selected to
1238                  * be moved. It will be scheduled for erasure in the
1239                  * wear-leveling worker.
1240                  */
1241                 dbg_wl("PEB %d is being moved, wait", pnum);
1242                 spin_unlock(&ubi->wl_lock);
1243
1244                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1245                 mutex_lock(&ubi->move_mutex);
1246                 mutex_unlock(&ubi->move_mutex);
1247                 goto retry;
1248         } else if (e == ubi->move_to) {
1249                 /*
1250                  * User is putting the physical eraseblock which was selected
1251                  * as the target the data is moved to. It may happen if the EBA
1252                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1253                  * but the WL sub-system has not put the PEB to the "used" tree
1254                  * yet, but it is about to do this. So we just set a flag which
1255                  * will tell the WL worker that the PEB is not needed anymore
1256                  * and should be scheduled for erasure.
1257                  */
1258                 dbg_wl("PEB %d is the target of data moving", pnum);
1259                 ubi_assert(!ubi->move_to_put);
1260                 ubi->move_to_put = 1;
1261                 spin_unlock(&ubi->wl_lock);
1262                 up_read(&ubi->fm_protect);
1263                 return 0;
1264         } else {
1265                 if (in_wl_tree(e, &ubi->used)) {
1266                         self_check_in_wl_tree(ubi, e, &ubi->used);
1267                         rb_erase(&e->u.rb, &ubi->used);
1268                 } else if (in_wl_tree(e, &ubi->scrub)) {
1269                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1270                         rb_erase(&e->u.rb, &ubi->scrub);
1271                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1272                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1273                         rb_erase(&e->u.rb, &ubi->erroneous);
1274                         ubi->erroneous_peb_count -= 1;
1275                         ubi_assert(ubi->erroneous_peb_count >= 0);
1276                         /* Erroneous PEBs should be tortured */
1277                         torture = 1;
1278                 } else {
1279                         err = prot_queue_del(ubi, e->pnum);
1280                         if (err) {
1281                                 ubi_err(ubi, "PEB %d not found", pnum);
1282                                 ubi_ro_mode(ubi);
1283                                 spin_unlock(&ubi->wl_lock);
1284                                 up_read(&ubi->fm_protect);
1285                                 return err;
1286                         }
1287                 }
1288         }
1289         spin_unlock(&ubi->wl_lock);
1290
1291         err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1292         if (err) {
1293                 spin_lock(&ubi->wl_lock);
1294                 wl_tree_add(e, &ubi->used);
1295                 spin_unlock(&ubi->wl_lock);
1296         }
1297
1298         up_read(&ubi->fm_protect);
1299         return err;
1300 }
1301
1302 /**
1303  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1304  * @ubi: UBI device description object
1305  * @pnum: the physical eraseblock to schedule
1306  *
1307  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1308  * needs scrubbing. This function schedules a physical eraseblock for
1309  * scrubbing which is done in background. This function returns zero in case of
1310  * success and a negative error code in case of failure.
1311  */
1312 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1313 {
1314         struct ubi_wl_entry *e;
1315
1316         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1317
1318 retry:
1319         spin_lock(&ubi->wl_lock);
1320         e = ubi->lookuptbl[pnum];
1321         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1322                                    in_wl_tree(e, &ubi->erroneous)) {
1323                 spin_unlock(&ubi->wl_lock);
1324                 return 0;
1325         }
1326
1327         if (e == ubi->move_to) {
1328                 /*
1329                  * This physical eraseblock was used to move data to. The data
1330                  * was moved but the PEB was not yet inserted to the proper
1331                  * tree. We should just wait a little and let the WL worker
1332                  * proceed.
1333                  */
1334                 spin_unlock(&ubi->wl_lock);
1335                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1336                 yield();
1337                 goto retry;
1338         }
1339
1340         if (in_wl_tree(e, &ubi->used)) {
1341                 self_check_in_wl_tree(ubi, e, &ubi->used);
1342                 rb_erase(&e->u.rb, &ubi->used);
1343         } else {
1344                 int err;
1345
1346                 err = prot_queue_del(ubi, e->pnum);
1347                 if (err) {
1348                         ubi_err(ubi, "PEB %d not found", pnum);
1349                         ubi_ro_mode(ubi);
1350                         spin_unlock(&ubi->wl_lock);
1351                         return err;
1352                 }
1353         }
1354
1355         wl_tree_add(e, &ubi->scrub);
1356         spin_unlock(&ubi->wl_lock);
1357
1358         /*
1359          * Technically scrubbing is the same as wear-leveling, so it is done
1360          * by the WL worker.
1361          */
1362         return ensure_wear_leveling(ubi, 0);
1363 }
1364
1365 /**
1366  * ubi_wl_flush - flush all pending works.
1367  * @ubi: UBI device description object
1368  * @vol_id: the volume id to flush for
1369  * @lnum: the logical eraseblock number to flush for
1370  *
1371  * This function executes all pending works for a particular volume id /
1372  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1373  * acts as a wildcard for all of the corresponding volume numbers or logical
1374  * eraseblock numbers. It returns zero in case of success and a negative error
1375  * code in case of failure.
1376  */
1377 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1378 {
1379         int err = 0;
1380         int found = 1;
1381
1382         /*
1383          * Erase while the pending works queue is not empty, but not more than
1384          * the number of currently pending works.
1385          */
1386         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1387                vol_id, lnum, ubi->works_count);
1388
1389         while (found) {
1390                 struct ubi_work *wrk, *tmp;
1391                 found = 0;
1392
1393                 down_read(&ubi->work_sem);
1394                 spin_lock(&ubi->wl_lock);
1395                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1396                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1397                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1398                                 list_del(&wrk->list);
1399                                 ubi->works_count -= 1;
1400                                 ubi_assert(ubi->works_count >= 0);
1401                                 spin_unlock(&ubi->wl_lock);
1402
1403                                 err = wrk->func(ubi, wrk, 0);
1404                                 if (err) {
1405                                         up_read(&ubi->work_sem);
1406                                         return err;
1407                                 }
1408
1409                                 spin_lock(&ubi->wl_lock);
1410                                 found = 1;
1411                                 break;
1412                         }
1413                 }
1414                 spin_unlock(&ubi->wl_lock);
1415                 up_read(&ubi->work_sem);
1416         }
1417
1418         /*
1419          * Make sure all the works which have been done in parallel are
1420          * finished.
1421          */
1422         down_write(&ubi->work_sem);
1423         up_write(&ubi->work_sem);
1424
1425         return err;
1426 }
1427
1428 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1429 {
1430         if (in_wl_tree(e, &ubi->scrub))
1431                 return false;
1432         else if (in_wl_tree(e, &ubi->erroneous))
1433                 return false;
1434         else if (ubi->move_from == e)
1435                 return false;
1436         else if (ubi->move_to == e)
1437                 return false;
1438
1439         return true;
1440 }
1441
1442 /**
1443  * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1444  * @ubi: UBI device description object
1445  * @pnum: the physical eraseblock to schedule
1446  * @force: dont't read the block, assume bitflips happened and take action.
1447  *
1448  * This function reads the given eraseblock and checks if bitflips occured.
1449  * In case of bitflips, the eraseblock is scheduled for scrubbing.
1450  * If scrubbing is forced with @force, the eraseblock is not read,
1451  * but scheduled for scrubbing right away.
1452  *
1453  * Returns:
1454  * %EINVAL, PEB is out of range
1455  * %ENOENT, PEB is no longer used by UBI
1456  * %EBUSY, PEB cannot be checked now or a check is currently running on it
1457  * %EAGAIN, bit flips happened but scrubbing is currently not possible
1458  * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1459  * %0, no bit flips detected
1460  */
1461 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1462 {
1463         int err = 0;
1464         struct ubi_wl_entry *e;
1465
1466         if (pnum < 0 || pnum >= ubi->peb_count) {
1467                 err = -EINVAL;
1468                 goto out;
1469         }
1470
1471         /*
1472          * Pause all parallel work, otherwise it can happen that the
1473          * erase worker frees a wl entry under us.
1474          */
1475         down_write(&ubi->work_sem);
1476
1477         /*
1478          * Make sure that the wl entry does not change state while
1479          * inspecting it.
1480          */
1481         spin_lock(&ubi->wl_lock);
1482         e = ubi->lookuptbl[pnum];
1483         if (!e) {
1484                 spin_unlock(&ubi->wl_lock);
1485                 err = -ENOENT;
1486                 goto out_resume;
1487         }
1488
1489         /*
1490          * Does it make sense to check this PEB?
1491          */
1492         if (!scrub_possible(ubi, e)) {
1493                 spin_unlock(&ubi->wl_lock);
1494                 err = -EBUSY;
1495                 goto out_resume;
1496         }
1497         spin_unlock(&ubi->wl_lock);
1498
1499         if (!force) {
1500                 mutex_lock(&ubi->buf_mutex);
1501                 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1502                 mutex_unlock(&ubi->buf_mutex);
1503         }
1504
1505         if (force || err == UBI_IO_BITFLIPS) {
1506                 /*
1507                  * Okay, bit flip happened, let's figure out what we can do.
1508                  */
1509                 spin_lock(&ubi->wl_lock);
1510
1511                 /*
1512                  * Recheck. We released wl_lock, UBI might have killed the
1513                  * wl entry under us.
1514                  */
1515                 e = ubi->lookuptbl[pnum];
1516                 if (!e) {
1517                         spin_unlock(&ubi->wl_lock);
1518                         err = -ENOENT;
1519                         goto out_resume;
1520                 }
1521
1522                 /*
1523                  * Need to re-check state
1524                  */
1525                 if (!scrub_possible(ubi, e)) {
1526                         spin_unlock(&ubi->wl_lock);
1527                         err = -EBUSY;
1528                         goto out_resume;
1529                 }
1530
1531                 if (in_pq(ubi, e)) {
1532                         prot_queue_del(ubi, e->pnum);
1533                         wl_tree_add(e, &ubi->scrub);
1534                         spin_unlock(&ubi->wl_lock);
1535
1536                         err = ensure_wear_leveling(ubi, 1);
1537                 } else if (in_wl_tree(e, &ubi->used)) {
1538                         rb_erase(&e->u.rb, &ubi->used);
1539                         wl_tree_add(e, &ubi->scrub);
1540                         spin_unlock(&ubi->wl_lock);
1541
1542                         err = ensure_wear_leveling(ubi, 1);
1543                 } else if (in_wl_tree(e, &ubi->free)) {
1544                         rb_erase(&e->u.rb, &ubi->free);
1545                         ubi->free_count--;
1546                         spin_unlock(&ubi->wl_lock);
1547
1548                         /*
1549                          * This PEB is empty we can schedule it for
1550                          * erasure right away. No wear leveling needed.
1551                          */
1552                         err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1553                                              force ? 0 : 1, true);
1554                 } else {
1555                         spin_unlock(&ubi->wl_lock);
1556                         err = -EAGAIN;
1557                 }
1558
1559                 if (!err && !force)
1560                         err = -EUCLEAN;
1561         } else {
1562                 err = 0;
1563         }
1564
1565 out_resume:
1566         up_write(&ubi->work_sem);
1567 out:
1568
1569         return err;
1570 }
1571
1572 /**
1573  * tree_destroy - destroy an RB-tree.
1574  * @ubi: UBI device description object
1575  * @root: the root of the tree to destroy
1576  */
1577 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1578 {
1579         struct rb_node *rb;
1580         struct ubi_wl_entry *e;
1581
1582         rb = root->rb_node;
1583         while (rb) {
1584                 if (rb->rb_left)
1585                         rb = rb->rb_left;
1586                 else if (rb->rb_right)
1587                         rb = rb->rb_right;
1588                 else {
1589                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1590
1591                         rb = rb_parent(rb);
1592                         if (rb) {
1593                                 if (rb->rb_left == &e->u.rb)
1594                                         rb->rb_left = NULL;
1595                                 else
1596                                         rb->rb_right = NULL;
1597                         }
1598
1599                         wl_entry_destroy(ubi, e);
1600                 }
1601         }
1602 }
1603
1604 /**
1605  * ubi_thread - UBI background thread.
1606  * @u: the UBI device description object pointer
1607  */
1608 int ubi_thread(void *u)
1609 {
1610         int failures = 0;
1611         struct ubi_device *ubi = u;
1612
1613         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1614                 ubi->bgt_name, task_pid_nr(current));
1615
1616         set_freezable();
1617         for (;;) {
1618                 int err;
1619
1620                 if (kthread_should_stop())
1621                         break;
1622
1623                 if (try_to_freeze())
1624                         continue;
1625
1626                 spin_lock(&ubi->wl_lock);
1627                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1628                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1629                         set_current_state(TASK_INTERRUPTIBLE);
1630                         spin_unlock(&ubi->wl_lock);
1631                         schedule();
1632                         continue;
1633                 }
1634                 spin_unlock(&ubi->wl_lock);
1635
1636                 err = do_work(ubi);
1637                 if (err) {
1638                         ubi_err(ubi, "%s: work failed with error code %d",
1639                                 ubi->bgt_name, err);
1640                         if (failures++ > WL_MAX_FAILURES) {
1641                                 /*
1642                                  * Too many failures, disable the thread and
1643                                  * switch to read-only mode.
1644                                  */
1645                                 ubi_msg(ubi, "%s: %d consecutive failures",
1646                                         ubi->bgt_name, WL_MAX_FAILURES);
1647                                 ubi_ro_mode(ubi);
1648                                 ubi->thread_enabled = 0;
1649                                 continue;
1650                         }
1651                 } else
1652                         failures = 0;
1653
1654                 cond_resched();
1655         }
1656
1657         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1658         ubi->thread_enabled = 0;
1659         return 0;
1660 }
1661
1662 /**
1663  * shutdown_work - shutdown all pending works.
1664  * @ubi: UBI device description object
1665  */
1666 static void shutdown_work(struct ubi_device *ubi)
1667 {
1668         while (!list_empty(&ubi->works)) {
1669                 struct ubi_work *wrk;
1670
1671                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1672                 list_del(&wrk->list);
1673                 wrk->func(ubi, wrk, 1);
1674                 ubi->works_count -= 1;
1675                 ubi_assert(ubi->works_count >= 0);
1676         }
1677 }
1678
1679 /**
1680  * erase_aeb - erase a PEB given in UBI attach info PEB
1681  * @ubi: UBI device description object
1682  * @aeb: UBI attach info PEB
1683  * @sync: If true, erase synchronously. Otherwise schedule for erasure
1684  */
1685 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1686 {
1687         struct ubi_wl_entry *e;
1688         int err;
1689
1690         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1691         if (!e)
1692                 return -ENOMEM;
1693
1694         e->pnum = aeb->pnum;
1695         e->ec = aeb->ec;
1696         ubi->lookuptbl[e->pnum] = e;
1697
1698         if (sync) {
1699                 err = sync_erase(ubi, e, false);
1700                 if (err)
1701                         goto out_free;
1702
1703                 wl_tree_add(e, &ubi->free);
1704                 ubi->free_count++;
1705         } else {
1706                 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1707                 if (err)
1708                         goto out_free;
1709         }
1710
1711         return 0;
1712
1713 out_free:
1714         wl_entry_destroy(ubi, e);
1715
1716         return err;
1717 }
1718
1719 /**
1720  * ubi_wl_init - initialize the WL sub-system using attaching information.
1721  * @ubi: UBI device description object
1722  * @ai: attaching information
1723  *
1724  * This function returns zero in case of success, and a negative error code in
1725  * case of failure.
1726  */
1727 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1728 {
1729         int err, i, reserved_pebs, found_pebs = 0;
1730         struct rb_node *rb1, *rb2;
1731         struct ubi_ainf_volume *av;
1732         struct ubi_ainf_peb *aeb, *tmp;
1733         struct ubi_wl_entry *e;
1734
1735         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1736         spin_lock_init(&ubi->wl_lock);
1737         mutex_init(&ubi->move_mutex);
1738         init_rwsem(&ubi->work_sem);
1739         ubi->max_ec = ai->max_ec;
1740         INIT_LIST_HEAD(&ubi->works);
1741
1742         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1743
1744         err = -ENOMEM;
1745         ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1746         if (!ubi->lookuptbl)
1747                 return err;
1748
1749         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1750                 INIT_LIST_HEAD(&ubi->pq[i]);
1751         ubi->pq_head = 0;
1752
1753         ubi->free_count = 0;
1754         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1755                 cond_resched();
1756
1757                 err = erase_aeb(ubi, aeb, false);
1758                 if (err)
1759                         goto out_free;
1760
1761                 found_pebs++;
1762         }
1763
1764         list_for_each_entry(aeb, &ai->free, u.list) {
1765                 cond_resched();
1766
1767                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1768                 if (!e) {
1769                         err = -ENOMEM;
1770                         goto out_free;
1771                 }
1772
1773                 e->pnum = aeb->pnum;
1774                 e->ec = aeb->ec;
1775                 ubi_assert(e->ec >= 0);
1776
1777                 wl_tree_add(e, &ubi->free);
1778                 ubi->free_count++;
1779
1780                 ubi->lookuptbl[e->pnum] = e;
1781
1782                 found_pebs++;
1783         }
1784
1785         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1786                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1787                         cond_resched();
1788
1789                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1790                         if (!e) {
1791                                 err = -ENOMEM;
1792                                 goto out_free;
1793                         }
1794
1795                         e->pnum = aeb->pnum;
1796                         e->ec = aeb->ec;
1797                         ubi->lookuptbl[e->pnum] = e;
1798
1799                         if (!aeb->scrub) {
1800                                 dbg_wl("add PEB %d EC %d to the used tree",
1801                                        e->pnum, e->ec);
1802                                 wl_tree_add(e, &ubi->used);
1803                         } else {
1804                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1805                                        e->pnum, e->ec);
1806                                 wl_tree_add(e, &ubi->scrub);
1807                         }
1808
1809                         found_pebs++;
1810                 }
1811         }
1812
1813         list_for_each_entry(aeb, &ai->fastmap, u.list) {
1814                 cond_resched();
1815
1816                 e = ubi_find_fm_block(ubi, aeb->pnum);
1817
1818                 if (e) {
1819                         ubi_assert(!ubi->lookuptbl[e->pnum]);
1820                         ubi->lookuptbl[e->pnum] = e;
1821                 } else {
1822                         bool sync = false;
1823
1824                         /*
1825                          * Usually old Fastmap PEBs are scheduled for erasure
1826                          * and we don't have to care about them but if we face
1827                          * an power cut before scheduling them we need to
1828                          * take care of them here.
1829                          */
1830                         if (ubi->lookuptbl[aeb->pnum])
1831                                 continue;
1832
1833                         /*
1834                          * The fastmap update code might not find a free PEB for
1835                          * writing the fastmap anchor to and then reuses the
1836                          * current fastmap anchor PEB. When this PEB gets erased
1837                          * and a power cut happens before it is written again we
1838                          * must make sure that the fastmap attach code doesn't
1839                          * find any outdated fastmap anchors, hence we erase the
1840                          * outdated fastmap anchor PEBs synchronously here.
1841                          */
1842                         if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1843                                 sync = true;
1844
1845                         err = erase_aeb(ubi, aeb, sync);
1846                         if (err)
1847                                 goto out_free;
1848                 }
1849
1850                 found_pebs++;
1851         }
1852
1853         dbg_wl("found %i PEBs", found_pebs);
1854
1855         ubi_assert(ubi->good_peb_count == found_pebs);
1856
1857         reserved_pebs = WL_RESERVED_PEBS;
1858         ubi_fastmap_init(ubi, &reserved_pebs);
1859
1860         if (ubi->avail_pebs < reserved_pebs) {
1861                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1862                         ubi->avail_pebs, reserved_pebs);
1863                 if (ubi->corr_peb_count)
1864                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1865                                 ubi->corr_peb_count);
1866                 err = -ENOSPC;
1867                 goto out_free;
1868         }
1869         ubi->avail_pebs -= reserved_pebs;
1870         ubi->rsvd_pebs += reserved_pebs;
1871
1872         /* Schedule wear-leveling if needed */
1873         err = ensure_wear_leveling(ubi, 0);
1874         if (err)
1875                 goto out_free;
1876
1877 #ifdef CONFIG_MTD_UBI_FASTMAP
1878         ubi_ensure_anchor_pebs(ubi);
1879 #endif
1880         return 0;
1881
1882 out_free:
1883         shutdown_work(ubi);
1884         tree_destroy(ubi, &ubi->used);
1885         tree_destroy(ubi, &ubi->free);
1886         tree_destroy(ubi, &ubi->scrub);
1887         kfree(ubi->lookuptbl);
1888         return err;
1889 }
1890
1891 /**
1892  * protection_queue_destroy - destroy the protection queue.
1893  * @ubi: UBI device description object
1894  */
1895 static void protection_queue_destroy(struct ubi_device *ubi)
1896 {
1897         int i;
1898         struct ubi_wl_entry *e, *tmp;
1899
1900         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1901                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1902                         list_del(&e->u.list);
1903                         wl_entry_destroy(ubi, e);
1904                 }
1905         }
1906 }
1907
1908 /**
1909  * ubi_wl_close - close the wear-leveling sub-system.
1910  * @ubi: UBI device description object
1911  */
1912 void ubi_wl_close(struct ubi_device *ubi)
1913 {
1914         dbg_wl("close the WL sub-system");
1915         ubi_fastmap_close(ubi);
1916         shutdown_work(ubi);
1917         protection_queue_destroy(ubi);
1918         tree_destroy(ubi, &ubi->used);
1919         tree_destroy(ubi, &ubi->erroneous);
1920         tree_destroy(ubi, &ubi->free);
1921         tree_destroy(ubi, &ubi->scrub);
1922         kfree(ubi->lookuptbl);
1923 }
1924
1925 /**
1926  * self_check_ec - make sure that the erase counter of a PEB is correct.
1927  * @ubi: UBI device description object
1928  * @pnum: the physical eraseblock number to check
1929  * @ec: the erase counter to check
1930  *
1931  * This function returns zero if the erase counter of physical eraseblock @pnum
1932  * is equivalent to @ec, and a negative error code if not or if an error
1933  * occurred.
1934  */
1935 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1936 {
1937         int err;
1938         long long read_ec;
1939         struct ubi_ec_hdr *ec_hdr;
1940
1941         if (!ubi_dbg_chk_gen(ubi))
1942                 return 0;
1943
1944         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1945         if (!ec_hdr)
1946                 return -ENOMEM;
1947
1948         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1949         if (err && err != UBI_IO_BITFLIPS) {
1950                 /* The header does not have to exist */
1951                 err = 0;
1952                 goto out_free;
1953         }
1954
1955         read_ec = be64_to_cpu(ec_hdr->ec);
1956         if (ec != read_ec && read_ec - ec > 1) {
1957                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1958                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1959                 dump_stack();
1960                 err = 1;
1961         } else
1962                 err = 0;
1963
1964 out_free:
1965         kfree(ec_hdr);
1966         return err;
1967 }
1968
1969 /**
1970  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1971  * @ubi: UBI device description object
1972  * @e: the wear-leveling entry to check
1973  * @root: the root of the tree
1974  *
1975  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1976  * is not.
1977  */
1978 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1979                                  struct ubi_wl_entry *e, struct rb_root *root)
1980 {
1981         if (!ubi_dbg_chk_gen(ubi))
1982                 return 0;
1983
1984         if (in_wl_tree(e, root))
1985                 return 0;
1986
1987         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1988                 e->pnum, e->ec, root);
1989         dump_stack();
1990         return -EINVAL;
1991 }
1992
1993 /**
1994  * self_check_in_pq - check if wear-leveling entry is in the protection
1995  *                        queue.
1996  * @ubi: UBI device description object
1997  * @e: the wear-leveling entry to check
1998  *
1999  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2000  */
2001 static int self_check_in_pq(const struct ubi_device *ubi,
2002                             struct ubi_wl_entry *e)
2003 {
2004         if (!ubi_dbg_chk_gen(ubi))
2005                 return 0;
2006
2007         if (in_pq(ubi, e))
2008                 return 0;
2009
2010         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2011                 e->pnum, e->ec);
2012         dump_stack();
2013         return -EINVAL;
2014 }
2015 #ifndef CONFIG_MTD_UBI_FASTMAP
2016 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2017 {
2018         struct ubi_wl_entry *e;
2019
2020         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2021         self_check_in_wl_tree(ubi, e, &ubi->free);
2022         ubi->free_count--;
2023         ubi_assert(ubi->free_count >= 0);
2024         rb_erase(&e->u.rb, &ubi->free);
2025
2026         return e;
2027 }
2028
2029 /**
2030  * produce_free_peb - produce a free physical eraseblock.
2031  * @ubi: UBI device description object
2032  *
2033  * This function tries to make a free PEB by means of synchronous execution of
2034  * pending works. This may be needed if, for example the background thread is
2035  * disabled. Returns zero in case of success and a negative error code in case
2036  * of failure.
2037  */
2038 static int produce_free_peb(struct ubi_device *ubi)
2039 {
2040         int err;
2041
2042         while (!ubi->free.rb_node && ubi->works_count) {
2043                 spin_unlock(&ubi->wl_lock);
2044
2045                 dbg_wl("do one work synchronously");
2046                 err = do_work(ubi);
2047
2048                 spin_lock(&ubi->wl_lock);
2049                 if (err)
2050                         return err;
2051         }
2052
2053         return 0;
2054 }
2055
2056 /**
2057  * ubi_wl_get_peb - get a physical eraseblock.
2058  * @ubi: UBI device description object
2059  *
2060  * This function returns a physical eraseblock in case of success and a
2061  * negative error code in case of failure.
2062  * Returns with ubi->fm_eba_sem held in read mode!
2063  */
2064 int ubi_wl_get_peb(struct ubi_device *ubi)
2065 {
2066         int err;
2067         struct ubi_wl_entry *e;
2068
2069 retry:
2070         down_read(&ubi->fm_eba_sem);
2071         spin_lock(&ubi->wl_lock);
2072         if (!ubi->free.rb_node) {
2073                 if (ubi->works_count == 0) {
2074                         ubi_err(ubi, "no free eraseblocks");
2075                         ubi_assert(list_empty(&ubi->works));
2076                         spin_unlock(&ubi->wl_lock);
2077                         return -ENOSPC;
2078                 }
2079
2080                 err = produce_free_peb(ubi);
2081                 if (err < 0) {
2082                         spin_unlock(&ubi->wl_lock);
2083                         return err;
2084                 }
2085                 spin_unlock(&ubi->wl_lock);
2086                 up_read(&ubi->fm_eba_sem);
2087                 goto retry;
2088
2089         }
2090         e = wl_get_wle(ubi);
2091         prot_queue_add(ubi, e);
2092         spin_unlock(&ubi->wl_lock);
2093
2094         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2095                                     ubi->peb_size - ubi->vid_hdr_aloffset);
2096         if (err) {
2097                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2098                 return err;
2099         }
2100
2101         return e->pnum;
2102 }
2103 #else
2104 #include "fastmap-wl.c"
2105 #endif