1
0
Fork 0
Commit Graph

127 Commits (a840b56ba385059742c2b7f4fd665ec9afb8931e)

Author SHA1 Message Date
Dan Carpenter 5578e48e5c ubi: wl: Silence uninitialized variable warning
This condition needs to be fipped around because "err" is uninitialized
when "force" is set.  The Smatch static analysis tool complains and
UBsan will also complain at runtime.

Fixes: 663586c0a8 ("ubi: Expose the bitrot interface")
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
Tested-by: Nathan Chancellor <natechancellor@gmail.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2019-03-05 21:21:07 +01:00
Richard Weinberger 663586c0a8 ubi: Expose the bitrot interface
Using UBI_IOCRPEB and UBI_IOCSPEB userspace can force
reading and scrubbing of PEBs.

In case of bitflips UBI will automatically take action
and move data to a different PEB.
This interface allows a daemon to foster your NAND.

Signed-off-by: Richard Weinberger <richard@nod.at>
2019-02-24 11:40:45 +01:00
Richard Weinberger b32b78f892 ubi: Introduce in_pq()
This function works like in_wl_tree() but checks whether an ubi_wl_entry
is currently in the protection queue.
We need this function to query the current state of an ubi_wl_entry.

Signed-off-by: Richard Weinberger <richard@nod.at>
2019-02-24 11:13:44 +01:00
Kees Cook 6396bb2215 treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:

        kzalloc(a * b, gfp)

with:
        kcalloc(a * b, gfp)

as well as handling cases of:

        kzalloc(a * b * c, gfp)

with:

        kzalloc(array3_size(a, b, c), gfp)

as it's slightly less ugly than:

        kzalloc_array(array_size(a, b), c, gfp)

This does, however, attempt to ignore constant size factors like:

        kzalloc(4 * 1024, gfp)

though any constants defined via macros get caught up in the conversion.

Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.

The Coccinelle script used for this was:

// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@

(
  kzalloc(
-	(sizeof(TYPE)) * E
+	sizeof(TYPE) * E
  , ...)
|
  kzalloc(
-	(sizeof(THING)) * E
+	sizeof(THING) * E
  , ...)
)

// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@

(
  kzalloc(
-	sizeof(u8) * (COUNT)
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(__u8) * (COUNT)
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(char) * (COUNT)
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(unsigned char) * (COUNT)
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(u8) * COUNT
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(__u8) * COUNT
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(char) * COUNT
+	COUNT
  , ...)
|
  kzalloc(
-	sizeof(unsigned char) * COUNT
+	COUNT
  , ...)
)

// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@

(
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * (COUNT_ID)
+	COUNT_ID, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * COUNT_ID
+	COUNT_ID, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * (COUNT_CONST)
+	COUNT_CONST, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * COUNT_CONST
+	COUNT_CONST, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * (COUNT_ID)
+	COUNT_ID, sizeof(THING)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * COUNT_ID
+	COUNT_ID, sizeof(THING)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * (COUNT_CONST)
+	COUNT_CONST, sizeof(THING)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * COUNT_CONST
+	COUNT_CONST, sizeof(THING)
  , ...)
)

// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@

- kzalloc
+ kcalloc
  (
-	SIZE * COUNT
+	COUNT, SIZE
  , ...)

// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@

(
  kzalloc(
-	sizeof(TYPE) * (COUNT) * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kzalloc(
-	sizeof(TYPE) * (COUNT) * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kzalloc(
-	sizeof(TYPE) * COUNT * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kzalloc(
-	sizeof(TYPE) * COUNT * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(TYPE))
  , ...)
|
  kzalloc(
-	sizeof(THING) * (COUNT) * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kzalloc(
-	sizeof(THING) * (COUNT) * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kzalloc(
-	sizeof(THING) * COUNT * (STRIDE)
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
|
  kzalloc(
-	sizeof(THING) * COUNT * STRIDE
+	array3_size(COUNT, STRIDE, sizeof(THING))
  , ...)
)

// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@

(
  kzalloc(
-	sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
  , ...)
|
  kzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
  , ...)
|
  kzalloc(
-	sizeof(THING1) * sizeof(THING2) * COUNT
+	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
  , ...)
|
  kzalloc(
-	sizeof(THING1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(THING1), sizeof(THING2))
  , ...)
|
  kzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * COUNT
+	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
  , ...)
|
  kzalloc(
-	sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+	array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
  , ...)
)

// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@

(
  kzalloc(
-	(COUNT) * STRIDE * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	COUNT * (STRIDE) * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	COUNT * STRIDE * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	(COUNT) * (STRIDE) * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	COUNT * (STRIDE) * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	(COUNT) * STRIDE * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	(COUNT) * (STRIDE) * (SIZE)
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
|
  kzalloc(
-	COUNT * STRIDE * SIZE
+	array3_size(COUNT, STRIDE, SIZE)
  , ...)
)

// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@

(
  kzalloc(C1 * C2 * C3, ...)
|
  kzalloc(
-	(E1) * E2 * E3
+	array3_size(E1, E2, E3)
  , ...)
|
  kzalloc(
-	(E1) * (E2) * E3
+	array3_size(E1, E2, E3)
  , ...)
|
  kzalloc(
-	(E1) * (E2) * (E3)
+	array3_size(E1, E2, E3)
  , ...)
|
  kzalloc(
-	E1 * E2 * E3
+	array3_size(E1, E2, E3)
  , ...)
)

// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@

(
  kzalloc(sizeof(THING) * C2, ...)
|
  kzalloc(sizeof(TYPE) * C2, ...)
|
  kzalloc(C1 * C2 * C3, ...)
|
  kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * (E2)
+	E2, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(TYPE) * E2
+	E2, sizeof(TYPE)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * (E2)
+	E2, sizeof(THING)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	sizeof(THING) * E2
+	E2, sizeof(THING)
  , ...)
|
- kzalloc
+ kcalloc
  (
-	(E1) * E2
+	E1, E2
  , ...)
|
- kzalloc
+ kcalloc
  (
-	(E1) * (E2)
+	E1, E2
  , ...)
|
- kzalloc
+ kcalloc
  (
-	E1 * E2
+	E1, E2
  , ...)
)

Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 16:19:22 -07:00
Richard Weinberger 6e7d801610 ubi: fastmap: Cancel work upon detach
Ben Hutchings pointed out that 29b7a6fa1e ("ubi: fastmap: Don't flush
fastmap work on detach") does not really fix the problem, it just
reduces the risk to hit the race window where fastmap work races against
free()'ing ubi->volumes[].

The correct approach is making sure that no more fastmap work is in
progress before we free ubi data structures.
So we cancel fastmap work right after the ubi background thread is
stopped.
By setting ubi->thread_enabled to zero we make sure that no further work
tries to wake the thread.

Fixes: 29b7a6fa1e ("ubi: fastmap: Don't flush fastmap work on detach")
Fixes: 74cdaf2400 ("UBI: Fastmap: Fix memory leaks while closing the WL sub-system")
Cc: stable@vger.kernel.org
Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
Cc: Martin Townsend <mtownsend1973@gmail.com>

Signed-off-by: Richard Weinberger <richard@nod.at>
2018-06-07 15:53:16 +02:00
Wei Yongjun 7233982ade mtd: ubi: wl: Fix error return code in ubi_wl_init()
Fix to return error code -ENOMEM from the kmem_cache_alloc() error
handling case instead of 0, as done elsewhere in this function.

Fixes: f78e5623f4 ("ubi: fastmap: Erase outdated anchor PEBs during
attach")
Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2018-01-18 16:45:19 +01:00
Sascha Hauer 889027bca2 ubi: Fastmap: Fix typo
Fix misspelling of 'available' in function name.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Richard Weinberger <richard@nod.at>
2018-01-18 00:18:50 +01:00
Sascha Hauer f78e5623f4 ubi: fastmap: Erase outdated anchor PEBs during attach
The fastmap update code might erase the current fastmap anchor PEB
in case it doesn't find any new free PEB. When a power cut happens
in this situation we must not have any outdated fastmap anchor PEB
on the device, because that would be used to attach during next
boot.
The easiest way to make that sure is to erase all outdated fastmap
anchor PEBs synchronously during attach.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Reviewed-by: Richard Weinberger <richard@nod.at>
Fixes: dbb7d2a88d ("UBI: Add fastmap core")
Cc: <stable@vger.kernel.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
2018-01-17 21:48:05 +01:00
Richard Weinberger 2e8f08deab ubi: Fix races around ubi_refill_pools()
When writing a new Fastmap the first thing that happens
is refilling the pools in memory.
At this stage it is possible that new PEBs from the new pools
get already claimed and written with data.
If this happens before the new Fastmap data structure hits the
flash and we face power cut the freshly written PEB will not
scanned and unnoticed.

Solve the issue by locking the pools until Fastmap is written.

Cc: <stable@vger.kernel.org>
Fixes: dbb7d2a88d ("UBI: Add fastmap core")
Signed-off-by: Richard Weinberger <richard@nod.at>
2016-10-02 22:54:01 +02:00
Richard Weinberger 2365418879 ubi: Deal with interrupted erasures in WL
When Fastmap is used we can face here an -EBADMSG
since Fastmap cannot know about unmaps.
If the erasure was interrupted the PEB may show ECC
errors and UBI would go to ro-mode as it assumes
that the PEB was check during attach time, which is
not the case with Fastmap.

Cc: <stable@vger.kernel.org>
Fixes: dbb7d2a88d ("UBI: Add fastmap core")
Signed-off-by: Richard Weinberger <richard@nod.at>
2016-10-02 22:49:54 +02:00
Boris Brezillon 3291b52f9f UBI: introduce the VID buffer concept
Currently, all VID headers are allocated and freed using the
ubi_zalloc_vid_hdr() and ubi_free_vid_hdr() function. These functions
make sure to align allocation on ubi->vid_hdr_alsize and adjust the
vid_hdr pointer to match the ubi->vid_hdr_shift requirements.
This works fine, but is a bit convoluted.
Moreover, the future introduction of LEB consolidation (needed to support
MLC/TLC NANDs) will allows a VID buffer to contain more than one VID
header.

Hence the creation of a ubi_vid_io_buf struct to attach extra information
to the VID header.

We currently only store the actual pointer of the underlying buffer, but
will soon add the number of VID headers contained in the buffer.

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2016-10-02 22:48:14 +02:00
Richard Weinberger fdf10ed710 ubi: Rework Fastmap attach base code
Introduce a new list to the UBI attach information
object to be able to deal better with old and corrupted
Fastmap eraseblocks.
Also move more Fastmap specific code into fastmap.c.

Signed-off-by: Richard Weinberger <richard@nod.at>
2016-07-29 23:32:42 +02:00
Heiko Schocher 73b0cd57fc UBI: Set free_count to zero before walking through erase list
Set free_count to zero before walking through ai->erase list
in wl_init().

Found in U-Boot as U-Boot has no workqueue/threads, it immediately
calls erase_worker(), which increase for each erased block
free_count. Without this patch, free_count gets after
this initialized to zero in wl_init(), so the free_count
variable always has the maybe wrong value 0 in U-Boot.

Signed-off-by: Heiko Schocher <hs@denx.de>
Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2016-05-24 15:24:33 +02:00
Sebastian Siewior 34b89df903 mtd: ubi: wl: avoid erasing a PEB which is empty
wear_leveling_worker() currently unconditionally puts a PEB on erase in
the error case even it just been taken from the free_list and never
used.
In case the PEB was never used it can be put back on the free list
saving a precious erase cycle.

v1…v2:
	- to_leb_clean -> dst_leb_clean
	- use the nested option for ensure_wear_leveling()
	- do_sync_erase() can't go -ENOMEM so we can just go into
	  RO-mode now.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Richard Weinberger <richard@nod.at>
2016-01-10 12:33:11 +01:00
Sebastian Siewior 6b238de189 mtd: ubi: don't leak e if schedule_erase() fails
If __erase_worker() fails to erase the EB and schedule_erase() fails as
well to do anything about it then we go RO. But that is not a reason to
leak the e argument here. Therefore clean up e.

Cc: <stable@vger.kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Richard Weinberger <richard@nod.at>
2015-12-16 22:59:03 +01:00
Sebastian Siewior 1a31b20cd8 mtd: ubi: fixup error correction in do_sync_erase()
Since fastmap we gained do_sync_erase(). This function can return an error
and its error handling isn't obvious. First the memory allocation for
struct ubi_work can fail and as such struct ubi_wl_entry is leaked.
However if the memory allocation succeeds then the tail function takes
care of the struct ubi_wl_entry. A free here could result in a double
free.
To make the error handling simpler, I split the tail function into one
piece which does the work and another which frees the struct ubi_work
which is passed as argument. As result do_sync_erase() can keep the
struct on stack and we get rid of one error source.

Cc: <stable@vger.kernel.org>
Fixes: 8199b901a ("UBI: Add fastmap support to the WL sub-system")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Richard Weinberger <richard@nod.at>
2015-12-16 22:52:46 +01:00
shengyong 7c7feb2ebf UBI: return ENOSPC if no enough space available
UBI: attaching mtd1 to ubi0
UBI: scanning is finished
UBI error: init_volumes: not enough PEBs, required 706, available 686
UBI error: ubi_wl_init: no enough physical eraseblocks (-20, need 1)
UBI error: ubi_attach_mtd_dev: failed to attach mtd1, error -12 <= NOT ENOMEM
UBI error: ubi_init: cannot attach mtd1

If available PEBs are not enough when initializing volumes, return -ENOSPC
directly. If available PEBs are not enough when initializing WL, return
-ENOSPC instead of -ENOMEM.

Cc: stable@vger.kernel.org
Signed-off-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Reviewed-by: David Gstir <david@sigma-star.at>
2015-09-29 12:47:05 +02:00
shengyong 669d3d1233 UBI: Remove unnecessary `\'
Signed-off-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2015-06-03 09:41:45 +02:00
Richard Weinberger 56794c0a1a UBI: Fastmap: Remove is_fm_block()
This function was added to fastmap in a very early stage
to have paranoid assertions.
With the current fastmap implementation this assert will never
trigger as fastmap PEBs are not seen by the WL sub-system.
Remove it to save us some CPU cycles.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:47:38 +01:00
Richard Weinberger 2f84c2469e UBI: Fastmap: Introduce may_reserve_for_fm()
...and kill another #ifdef.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:47:36 +01:00
Richard Weinberger acfda79f88 UBI: Fastmap: Introduce ubi_fastmap_init()
...and kill another #ifdef in wl.c. :-)

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:47:36 +01:00
Richard Weinberger 78d6d497a6 UBI: Move fastmap specific functions out of wl.c
Fastmap is tightly connected to the WL sub-system, many fastmap-specific
functionslive in wl.c.
To get rid of most #ifdefs in wl.c move this functions into a new file
and include it into wl.c

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:47:28 +01:00
Richard Weinberger ee59ba8b06 UBI: Fix stale pointers in ubi->lookuptbl
In some error paths the WL sub-system gives up on a PEB
and frees it's ubi_wl_entry struct but does not set
the entry in ubi->lookuptbl to NULL.
Fastmap can stumble over such a stale pointer as it uses
ubi->lookuptbl to find all PEBs.

Fix this by introducing a new helper function which free()s
a WL entry and removes the reference from the lookup table.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:46:04 +01:00
Richard Weinberger 111ab0b26f UBI: Fastmap: Locking updates
a) Rename ubi->fm_sem to ubi->fm_eba_sem as this semaphore
protects EBA changes.
b) Turn ubi->fm_mutex into a rw semaphore. It will still serialize
fastmap writes but also ensures that ubi_wl_put_peb() is not
interrupted by a fastmap write. We use a rw semaphore to allow
ubi_wl_put_peb() still to be executed in parallel if no fastmap
write is happening.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:46:02 +01:00
Richard Weinberger 8fb2a51478 UBI: Fastmap: Fix race after ubi_wl_get_peb()
ubi_wl_get_peb() returns a fresh PEB which can be used by
user of UBI. Due to the pool logic fastmap will correctly
map this PEB upon attach time because it will be scanned.

If a new fastmap is written (due to heavy parallel io)
while the before the fresh PEB is assigned to the EBA table
it will not be scanned as it is no longer in the pool.
So, the race window exists between ubi_wl_get_peb()
and the EBA table assignment.
We have to make sure that no new fastmap can be written
while that.

To ensure that ubi_wl_get_peb() will grab ubi->fm_sem in read mode
and the user of ubi_wl_get_peb() has to release it after the PEB
got assigned.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:46:01 +01:00
Richard Weinberger 943b33564e UBI: Fastmap: Notify user in case of an ubi_update_fastmap() failure
If ubi_update_fastmap() fails notify the user.
This is not a hard error as ubi_update_fastmap() makes sure that upon failure
the current on-flash fastmap will no be used upon next UBI attach.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:45:55 +01:00
Richard Weinberger 74cdaf2400 UBI: Fastmap: Fix memory leaks while closing the WL sub-system
Add a ubi_fastmap_close() to free all resources used by fastmap
at WL shutdown.

Signed-off-by: Richard Weinberger <richard@nod.at>
Tested-by: Guido Martínez <guido@vanguardiasur.com.ar>
Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
2015-03-26 22:45:48 +01:00
Richard Weinberger c4ca6be9d6 UBI: Fastmap: Don't allocate new ubi_wl_entry objects
There is no need to allocate new ones every time, we can reuse
the existing ones.
This makes the code cleaner and more easy to follow.

Signed-off-by: Richard Weinberger <richard@nod.at>
Reviewed-by: Tanya Brokhman <tlinder@codeaurora.org>
Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
2015-03-26 22:17:47 +01:00
Richard Weinberger 6830356466 UBI: Fastmap: Make ubi_refill_pools() fair
Currently ubi_refill_pools() first fills the first and then
the second one.
If only very few free PEBs are available the second pool can get
zero PEBs.
Change ubi_refill_pools() to distribute free PEBs fair between
all pools.

Signed-off-by: Richard Weinberger <richard@nod.at>
Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
2015-03-26 22:17:47 +01:00
Richard Weinberger 691a870563 UBI: Split __wl_get_peb()
Make it two functions, wl_get_wle() and wl_get_peb().
wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle()
does not call produce_free_peb().
While refilling the fastmap user pool we cannot release ubi->wl_lock
as produce_free_peb() does.
Hence the fastmap logic uses now wl_get_wle().

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 22:17:46 +01:00
Richard Weinberger d59f21bebe UBI: Fastmap: Fix races in ubi_wl_get_peb()
ubi_wl_get_peb() has two problems, it reads the pool
size and usage counters without any protection.
While reading one value would be perfectly fine it reads multiple
values and compares them. This is racy and can lead to incorrect
pool handling.
Furthermore ubi_update_fastmap() is called without wl_lock held,
before incrementing the used counter it needs to be checked again.
It could happen that another thread consumed all PEBs from the
pool and the counter goes beyond ->size.

Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 21:24:24 +01:00
Richard Weinberger 399a9feeac UBI: Fastmap: Ensure that all fastmap work is done upon WL shutdown
...otherwise the deferred work might run after datastructures
got freed and corrupt memory.

Signed-off-by: Richard Weinberger <richard@nod.at>
Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
2015-03-26 19:31:26 +01:00
Richard Weinberger 19371d73c9 UBI: Fastmap: Ensure that only one fastmap work is scheduled
If the WL pool runs out of PEBs we schedule a fastmap write
to refill it as soon as possible.
Ensure that only one at a time is scheduled otherwise we might end in
a fastmap write storm because writing the fastmap can schedule another
write if bitflips are detected.

Signed-off-by: Richard Weinberger <richard@nod.at>
Reviewed-by: Tanya Brokhman <tlinder@codeaurora.org>
Reviewed-by: Guido Martínez <guido@vanguardiasur.com.ar>
2015-03-26 19:30:54 +01:00
Brian Norris f16db8071c UBI: initialize LEB number variable
In some of the 'out_not_moved' error paths, lnum may be used
uninitialized. Don't ignore the warning; let's fix it.

This uninitialized variable doesn't have much visible effect in the end,
since we just schedule the PEB for erasure, and its LEB number doesn't
really matter (it just gets printed in debug messages). But let's get it
straight anyway.

Coverity CID #113449

Cc: stable <stable@vger.kernel.org>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
2015-03-26 12:07:18 +01:00
Tanya Brokhman 45fc5c81d0 UBI: extend UBI layer debug/messaging capabilities - cosmetics
Some cosmetic fixes to the patch "UBI: Extend UBI layer debug/messaging
capabilities".

Signed-off-by: Tanya Brokhman <tlinder@codeaurora.org>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2015-01-28 16:09:09 +01:00
Richard Weinberger aa5ad3b6eb UBI: Fix double free after do_sync_erase()
If the erase worker is unable to erase a PEB it will
free the ubi_wl_entry itself.
The failing ubi_wl_entry must not free()'d again after
do_sync_erase() returns.

Cc: <stable@vger.kernel.org>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-11-07 13:42:43 +02:00
Tanya Brokhman 3260870331 UBI: Extend UBI layer debug/messaging capabilities
If there is more then one UBI device mounted, there is no way to
distinguish between messages from different UBI devices.
Add device number to all ubi layer message types.

The R/O block driver messages were replaced by pr_* since
ubi_device structure is not used by it.

Amended a bit by Artem.

Signed-off-by: Tanya Brokhman <tlinder@codeaurora.org>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-11-07 12:08:51 +02:00
Richard Weinberger e3e00445d4 UBI: Fix trivial typo in __schedule_ubi_work
s/of/if/

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-09-30 09:29:37 +03:00
Richard Weinberger 849271a4e4 UBI: wl: Rename cancel flag to shutdown
It confused me more than once that the cancel flag of the
work function does not indicate the cancellation of a single work.
In fact it indicates the WL sub-system shutdown and therefore
worker functions have to free their wl_entries too.
That's why you cannot cancel a single work, you can only shutdown
all works.

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-09-26 13:43:32 +03:00
Richard Weinberger b91671bb23 UBI: Fix livelock in produce_free_peb()
The while loop in produce_free_peb() assumes that each work will produce a
free PEB. This is not true.
If ubi->works_count is 1 and the only scheduled work is the
wear_leveling_worker() produce_free_peb() can loop forever in case
nobody schedules an erase work.
Fix this issue by checking in the while loop whether work is scheduled.

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-09-24 15:36:16 +03:00
Richard Weinberger 49e236bc4a UBI: bugfix in ubi_wl_flush()
Use the _safe variant because we're iterating over a list where items get
deleted and freed.

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-07-28 19:07:33 +03:00
Tanya Brokhman 3d21bb7667 UBI: fix ubi free PEBs count calculation
The ubi->free_count should be updated with every insert/remove to/from
the ubi->free list.

Signed-off-by: Tanya Brokhman <tlinder@codeaurora.org>
Reviewed-by: Dolev Raviv <draviv@codeaurora.org>
Acked-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-05-05 09:31:33 +03:00
Tanya Brokhman 87ed89d21e UBI: fix error path in __wl_get_peb
In case of an error (if there are not free PEB's for example),
__wl_get_peb will return a negative value. In order to prevent access
violation we need to test the returned value prior to using it later on.

Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
Reviewed-by: Dolev Raviv <draviv@codeaurora.org>
Acked-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2014-05-05 09:31:33 +03:00
Richard Weinberger fb10e4d418 UBI: fix refill_wl_user_pool()
If no free PEBs are available refill_wl_user_pool() must not
return with -ENOSPC immediately.
It has to block till produce_free_peb() produced a free PEB.

Reported-and-Tested-by: Richard Genoud <richard.genoud@gmail.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2013-10-03 19:42:46 +03:00
Richard Weinberger 5ef4414f4b UBI: Fix PEB leak in wear_leveling_worker()
get_peb_for_wl() removes the PEB from the free list.
If the WL subsystem detects that no wear leveling is needed
it cancels the operation and drops the gained PEB.
In this case we have to put the PEB back into the free list.

This issue was introduced with commit ed4b7021c
(UBI: remove PEB from free tree in get_peb_for_wl()).

Cc: <stable@vger.kernel.org> # 3.7.x
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2013-08-19 18:15:11 +03:00
Linus Torvalds 85d5b70d8a Nothing exciting, just clean-ups and nicification. Oh, and one small
optimization which makes UBI to use less RAM.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.12 (GNU/Linux)
 
 iQIcBAABAgAGBQJQ0scJAAoJECmIfjd9wqK0P5oQAKm//ubgQYE1EYhhlVcIcywc
 MdIY3bxsenoqOJlVN8KP7MFIWTG7Zxhfl72wS3RHsOaxjKj+FmJbmRjcAiEx6xoy
 0oc0v3hZ+jLVlIejdAtpQIYwtoHkM/mnuSX/T5K+GVGsVums52ItAEibTzbo2O7G
 akva1Wn7LCSqJ2i9srAJtOIx0lqanvd0M56UvRgRhW1lwENrAYmiRrzR1Ph1v6Fe
 tXjPLf97ZwblP28E0rtqizCL4IJN1+IDj9Q8DP202jOTXsCK8OyIyyDO7wDZgfy2
 JaD8y1mS4lJsCVxWRIEnX9NRkxiyFlMHMlDGmZn2QbuJU8A8Ju0NLf3FZ0CGtywT
 2mDxtFhTCovShUlNYnCiS9p1dwJR34EGXQ0ueBfejO0J2wU7PvmFmxd1kYO8dme/
 lfPZZ7nnTQSnQ/CiRujtC/T5FkSi8pHViDZ5SOyXHCYi5EJ1iQ4O/NuxcGy81AnX
 kXkTWlLoB4M6BSX7TXaQIEey/PO1KbP0WDzZDob0InWucA+zu4yWg1feafBdtcOt
 vMjfJZDPTcrTDwSb7vtSmhxMdz1TXKGHp7Lf/ePMlFthra0vcvJsbTfev/zLDeYt
 9S/KBsdFhlvHhHpKHLTl0wUfOoFqbCeR4d+tQ7R/dgMYYt6jeZ4JG3PQ1bh4Xwk/
 O3bRMTppCV2MbRarvcH7
 =mdJB
 -----END PGP SIGNATURE-----

Merge tag 'upstream-3.8-rc1' of git://git.infradead.org/linux-ubi

Pull UBI update from Artem Bityutskiy:
 "Nothing exciting, just clean-ups and nicification.  Oh, and one small
  optimization which makes UBI to use less RAM."

* tag 'upstream-3.8-rc1' of git://git.infradead.org/linux-ubi:
  UBI: embed ubi_debug_info field in ubi_device struct
  UBI: introduce helpers dbg_chk_{io, gen}
  UBI: replace memcpy with struct assignment
  UBI: remove spurious comment
  UBI: gluebi: rename misleading variables
  UBI: do not allocate the memory unnecessarily
  UBI: use list_move_tail instead of list_del/list_add_tail
2012-12-20 07:39:03 -08:00
Ezequiel Garcia 64575574f2 UBI: introduce helpers dbg_chk_{io, gen}
With this patch code is a bit more readable and there's no
generated code or functionality impact.
Furthermore, this abstracts implementation details and
will allow to change ubi_debug_info in a less invasive way.

Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2012-12-10 13:38:59 +02:00
Richard Weinberger 894aef2157 UBI: dont call ubi_self_check_all_ff() in __wl_get_peb()
As ubi_self_check_all_ff() might sleep we are not allowed
to call it from atomic context.
For now we call it only from ubi_wl_get_peb().
There are some code paths where it would also make sense,
but these paths are currently atomic and only enabled
when fastmap is used.

Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2012-12-04 16:04:31 +02:00
Richard Weinberger ed4b7021cb UBI: remove PEB from free tree in get_peb_for_wl()
If UBI is built without fastmap, get_peb_for_wl() has to
remove the PEB manially from the free tree.
Otherwise the requested PEB lives in two trees.

Reported-by: Zach Sadecki <zsadecki@itwatchdogs.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2012-12-04 16:04:16 +02:00
Ezequiel Garcia 38f92cca8a UBI: remove spurious comment
This line of comment looks completely bogus.

It was introduced in:
commit d99383b00e
Author: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Date:   Wed May 18 14:47:34 2011 +0300

    UBI: change the interface of a debugging check function

Remove it.

Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com>
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
2012-11-30 13:54:41 +02:00