From 6bc335828056f3b301a3deadda782de4e8f0db08 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Tue, 3 Nov 2020 09:25:57 -0500 Subject: [PATCH 001/130] rcu/tree: Make rcu_do_batch count how many callbacks were executed The rcu_do_batch() function extracts the ready-to-invoke callbacks from the rcu_segcblist located in the ->cblist field of the current CPU's rcu_data structure. These callbacks are first moved to a local (unsegmented) rcu_cblist. The rcu_do_batch() function then uses this rcu_cblist's ->len field to count how many CBs it has invoked, but it does so by counting that field down from zero. Finally, this function negates the value in this ->len field (resulting in a positive number) and subtracts the result from the ->len field of the current CPU's ->cblist field. Except that it is sometimes necessary for rcu_do_batch() to stop invoking callbacks mid-stream, despite there being more ready to invoke, for example, if a high-priority task wakes up. In this case the remaining not-yet-invoked callbacks are requeued back onto the CPU's ->cblist, but remain in the ready-to-invoke segment of that list. As above, the negative of the local rcu_cblist's ->len field is still subtracted from the ->len field of the current CPU's ->cblist field. The design of counting down from 0 is confusing and error-prone, plus use of a positive count will make it easier to provide a uniform and consistent API to deal with the per-segment counts that are added later in this series. For example, rcu_segcblist_extract_done_cbs() can unconditionally populate the resulting unsegmented list's ->len field during extraction. This commit therefore explicitly counts how many callbacks were executed in rcu_do_batch() itself, counting up from zero, and then uses that to update the per-CPU segcb list's ->len field, without relying on the downcounting of rcl->len from zero. Reviewed-by: Frederic Weisbecker Reviewed-by: Neeraj Upadhyay Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.c | 2 +- kernel/rcu/rcu_segcblist.h | 1 + kernel/rcu/tree.c | 11 +++++------ 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 2d2a6b6b9dfb..bb246d8c6ef1 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -95,7 +95,7 @@ static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v) * This increase is fully ordered with respect to the callers accesses * both before and after. */ -static void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v) +void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v) { #ifdef CONFIG_RCU_NOCB_CPU smp_mb__before_atomic(); /* Up to the caller! */ diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 492262bcb591..1d2d61406463 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -76,6 +76,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) } void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); +void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v); void rcu_segcblist_init(struct rcu_segcblist *rsclp); void rcu_segcblist_disable(struct rcu_segcblist *rsclp); void rcu_segcblist_offload(struct rcu_segcblist *rsclp); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e..cc6f379c7ddf 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2434,7 +2434,7 @@ static void rcu_do_batch(struct rcu_data *rdp) const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); struct rcu_head *rhp; struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); - long bl, count; + long bl, count = 0; long pending, tlimit = 0; /* If no callbacks are ready, just return. */ @@ -2479,6 +2479,7 @@ static void rcu_do_batch(struct rcu_data *rdp) for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { rcu_callback_t f; + count++; debug_rcu_head_unqueue(rhp); rcu_lock_acquire(&rcu_callback_map); @@ -2492,15 +2493,14 @@ static void rcu_do_batch(struct rcu_data *rdp) /* * Stop only if limit reached and CPU has something to do. - * Note: The rcl structure counts down from zero. */ - if (-rcl.len >= bl && !offloaded && + if (count >= bl && !offloaded && (need_resched() || (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) break; if (unlikely(tlimit)) { /* only call local_clock() every 32 callbacks */ - if (likely((-rcl.len & 31) || local_clock() < tlimit)) + if (likely((count & 31) || local_clock() < tlimit)) continue; /* Exceeded the time limit, so leave. */ break; @@ -2517,7 +2517,6 @@ static void rcu_do_batch(struct rcu_data *rdp) local_irq_save(flags); rcu_nocb_lock(rdp); - count = -rcl.len; rdp->n_cbs_invoked += count; trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); @@ -2525,7 +2524,7 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Update counts and requeue any remaining callbacks. */ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); smp_mb(); /* List handling before counting for rcu_barrier(). */ - rcu_segcblist_insert_count(&rdp->cblist, &rcl); + rcu_segcblist_add_len(&rdp->cblist, -count); /* Reinstate batch limit if we have worked down the excess. */ count = rcu_segcblist_n_cbs(&rdp->cblist); From be06c2577eca6d9dbf61985d4078eb904024380f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 9 Nov 2020 08:22:16 -0800 Subject: [PATCH 002/130] docs: Remove redundant "``" from Requirements.rst The docbook system has learned that "()" designates a function, so this commit removes the no-longer-needed "``" to improve readability of the raw .rst file. Reported-by: Peter Zijlstra Cc: Mauro Carvalho Chehab Cc: Jonathan Corbet [ paulmck: Apply Stephen Rothwell feedback. ] Signed-off-by: Paul E. McKenney --- .../RCU/Design/Requirements/Requirements.rst | 664 +++++++++--------- 1 file changed, 332 insertions(+), 332 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index e8c84fcc0507..9b23be637e17 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -72,11 +72,11 @@ understanding of this guarantee. RCU's grace-period guarantee allows updaters to wait for the completion of all pre-existing RCU read-side critical sections. An RCU read-side -critical section begins with the marker ``rcu_read_lock()`` and ends -with the marker ``rcu_read_unlock()``. These markers may be nested, and +critical section begins with the marker rcu_read_lock() and ends +with the marker rcu_read_unlock(). These markers may be nested, and RCU treats a nested set as one big RCU read-side critical section. -Production-quality implementations of ``rcu_read_lock()`` and -``rcu_read_unlock()`` are extremely lightweight, and in fact have +Production-quality implementations of rcu_read_lock() and +rcu_read_unlock() are extremely lightweight, and in fact have exactly zero overhead in Linux kernels built for production use with ``CONFIG_PREEMPT=n``. @@ -102,12 +102,12 @@ overhead to readers, for example: 15 WRITE_ONCE(y, 1); 16 } -Because the ``synchronize_rcu()`` on line 14 waits for all pre-existing -readers, any instance of ``thread0()`` that loads a value of zero from -``x`` must complete before ``thread1()`` stores to ``y``, so that +Because the synchronize_rcu() on line 14 waits for all pre-existing +readers, any instance of thread0() that loads a value of zero from +``x`` must complete before thread1() stores to ``y``, so that instance must also load a value of zero from ``y``. Similarly, any -instance of ``thread0()`` that loads a value of one from ``y`` must have -started after the ``synchronize_rcu()`` started, and must therefore also +instance of thread0() that loads a value of one from ``y`` must have +started after the synchronize_rcu() started, and must therefore also load a value of one from ``x``. Therefore, the outcome: :: @@ -121,14 +121,14 @@ cannot happen. +-----------------------------------------------------------------------+ | Wait a minute! You said that updaters can make useful forward | | progress concurrently with readers, but pre-existing readers will | -| block ``synchronize_rcu()``!!! | +| block synchronize_rcu()!!! | | Just who are you trying to fool??? | +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ | First, if updaters do not wish to be blocked by readers, they can use | -| ``call_rcu()`` or ``kfree_rcu()``, which will be discussed later. | -| Second, even when using ``synchronize_rcu()``, the other update-side | +| call_rcu() or kfree_rcu(), which will be discussed later. | +| Second, even when using synchronize_rcu(), the other update-side | | code does run concurrently with readers, whether pre-existing or not. | +-----------------------------------------------------------------------+ @@ -170,34 +170,34 @@ recovery from node failure, more or less as follows: 29 WRITE_ONCE(state, STATE_NORMAL); 30 } -The RCU read-side critical section in ``do_something_dlm()`` works with -the ``synchronize_rcu()`` in ``start_recovery()`` to guarantee that -``do_something()`` never runs concurrently with ``recovery()``, but with -little or no synchronization overhead in ``do_something_dlm()``. +The RCU read-side critical section in do_something_dlm() works with +the synchronize_rcu() in start_recovery() to guarantee that +do_something() never runs concurrently with recovery(), but with +little or no synchronization overhead in do_something_dlm(). +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| Why is the ``synchronize_rcu()`` on line 28 needed? | +| Why is the synchronize_rcu() on line 28 needed? | +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ | Without that extra grace period, memory reordering could result in | -| ``do_something_dlm()`` executing ``do_something()`` concurrently with | -| the last bits of ``recovery()``. | +| do_something_dlm() executing do_something() concurrently with | +| the last bits of recovery(). | +-----------------------------------------------------------------------+ In order to avoid fatal problems such as deadlocks, an RCU read-side -critical section must not contain calls to ``synchronize_rcu()``. +critical section must not contain calls to synchronize_rcu(). Similarly, an RCU read-side critical section must not contain anything that waits, directly or indirectly, on completion of an invocation of -``synchronize_rcu()``. +synchronize_rcu(). Although RCU's grace-period guarantee is useful in and of itself, with `quite a few use cases `__, it would be good to be able to use RCU to coordinate read-side access to linked data structures. For this, the grace-period guarantee is not sufficient, -as can be seen in function ``add_gp_buggy()`` below. We will look at the +as can be seen in function add_gp_buggy() below. We will look at the reader's code later, but in the meantime, just think of the reader as locklessly picking up the ``gp`` pointer, and, if the value loaded is non-\ ``NULL``, locklessly accessing the ``->a`` and ``->b`` fields. @@ -256,8 +256,8 @@ Publish/Subscribe Guarantee RCU's publish-subscribe guarantee allows data to be inserted into a linked data structure without disrupting RCU readers. The updater uses -``rcu_assign_pointer()`` to insert the new data, and readers use -``rcu_dereference()`` to access data, whether new or old. The following +rcu_assign_pointer() to insert the new data, and readers use +rcu_dereference() to access data, whether new or old. The following shows an example of insertion: :: @@ -279,7 +279,7 @@ shows an example of insertion: 15 return true; 16 } -The ``rcu_assign_pointer()`` on line 13 is conceptually equivalent to a +The rcu_assign_pointer() on line 13 is conceptually equivalent to a simple assignment statement, but also guarantees that its assignment will happen after the two assignments in lines 11 and 12, similar to the C11 ``memory_order_release`` store operation. It also prevents any @@ -289,7 +289,7 @@ number of “interesting” compiler optimizations, for example, the use of +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| But ``rcu_assign_pointer()`` does nothing to prevent the two | +| But rcu_assign_pointer() does nothing to prevent the two | | assignments to ``p->a`` and ``p->b`` from being reordered. Can't that | | also cause problems? | +-----------------------------------------------------------------------+ @@ -303,7 +303,7 @@ number of “interesting” compiler optimizations, for example, the use of It is tempting to assume that the reader need not do anything special to control its accesses to the RCU-protected data, as shown in -``do_something_gp_buggy()`` below: +do_something_gp_buggy() below: :: @@ -345,7 +345,7 @@ If this function ran concurrently with a series of updates that replaced the current structure with a new one, the fetches of ``gp->a`` and ``gp->b`` might well come from two different structures, which could cause serious confusion. To prevent this (and much else besides), -``do_something_gp()`` uses ``rcu_dereference()`` to fetch from ``gp``: +do_something_gp() uses rcu_dereference() to fetch from ``gp``: :: @@ -362,21 +362,21 @@ cause serious confusion. To prevent this (and much else besides), 11 return false; 12 } -The ``rcu_dereference()`` uses volatile casts and (for DEC Alpha) memory +The rcu_dereference() uses volatile casts and (for DEC Alpha) memory barriers in the Linux kernel. Should a `high-quality implementation of C11 ``memory_order_consume`` [PDF] `__ -ever appear, then ``rcu_dereference()`` could be implemented as a +ever appear, then rcu_dereference() could be implemented as a ``memory_order_consume`` load. Regardless of the exact implementation, a -pointer fetched by ``rcu_dereference()`` may not be used outside of the +pointer fetched by rcu_dereference() may not be used outside of the outermost RCU read-side critical section containing that -``rcu_dereference()``, unless protection of the corresponding data +rcu_dereference(), unless protection of the corresponding data element has been passed from RCU to some other synchronization mechanism, most commonly locking or `reference counting `__. -In short, updaters use ``rcu_assign_pointer()`` and readers use -``rcu_dereference()``, and these two RCU API elements work together to +In short, updaters use rcu_assign_pointer() and readers use +rcu_dereference(), and these two RCU API elements work together to ensure that readers have a consistent view of newly added data elements. Of course, it is also necessary to remove elements from RCU-protected @@ -388,9 +388,9 @@ data structures, for example, using the following process: the newly removed data element). #. At this point, only the updater has a reference to the newly removed data element, so it can safely reclaim the data element, for example, - by passing it to ``kfree()``. + by passing it to kfree(). -This process is implemented by ``remove_gp_synchronous()``: +This process is implemented by remove_gp_synchronous(): :: @@ -413,16 +413,16 @@ This process is implemented by ``remove_gp_synchronous()``: This function is straightforward, with line 13 waiting for a grace period before line 14 frees the old data element. This waiting ensures -that readers will reach line 7 of ``do_something_gp()`` before the data -element referenced by ``p`` is freed. The ``rcu_access_pointer()`` on -line 6 is similar to ``rcu_dereference()``, except that: +that readers will reach line 7 of do_something_gp() before the data +element referenced by ``p`` is freed. The rcu_access_pointer() on +line 6 is similar to rcu_dereference(), except that: -#. The value returned by ``rcu_access_pointer()`` cannot be +#. The value returned by rcu_access_pointer() cannot be dereferenced. If you want to access the value pointed to as well as - the pointer itself, use ``rcu_dereference()`` instead of - ``rcu_access_pointer()``. -#. The call to ``rcu_access_pointer()`` need not be protected. In - contrast, ``rcu_dereference()`` must either be within an RCU + the pointer itself, use rcu_dereference() instead of + rcu_access_pointer(). +#. The call to rcu_access_pointer() need not be protected. In + contrast, rcu_dereference() must either be within an RCU read-side critical section or in a code segment where the pointer cannot change, for example, in code protected by the corresponding update-side lock. @@ -430,13 +430,13 @@ line 6 is similar to ``rcu_dereference()``, except that: +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| Without the ``rcu_dereference()`` or the ``rcu_access_pointer()``, | +| Without the rcu_dereference() or the rcu_access_pointer(), | | what destructive optimizations might the compiler make use of? | +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ -| Let's start with what happens to ``do_something_gp()`` if it fails to | -| use ``rcu_dereference()``. It could reuse a value formerly fetched | +| Let's start with what happens to do_something_gp() if it fails to | +| use rcu_dereference(). It could reuse a value formerly fetched | | from this same pointer. It could also fetch the pointer from ``gp`` | | in a byte-at-a-time manner, resulting in *load tearing*, in turn | | resulting a bytewise mash-up of two distinct pointer values. It might | @@ -445,15 +445,15 @@ line 6 is similar to ``rcu_dereference()``, except that: | update has changed the pointer to match the wrong guess. Too bad | | about any dereferences that returned pre-initialization garbage in | | the meantime! | -| For ``remove_gp_synchronous()``, as long as all modifications to | +| For remove_gp_synchronous(), as long as all modifications to | | ``gp`` are carried out while holding ``gp_lock``, the above | | optimizations are harmless. However, ``sparse`` will complain if you | | define ``gp`` with ``__rcu`` and then access it without using either | -| ``rcu_access_pointer()`` or ``rcu_dereference()``. | +| rcu_access_pointer() or rcu_dereference(). | +-----------------------------------------------------------------------+ In short, RCU's publish-subscribe guarantee is provided by the -combination of ``rcu_assign_pointer()`` and ``rcu_dereference()``. This +combination of rcu_assign_pointer() and rcu_dereference(). This guarantee allows data elements to be safely added to RCU-protected linked data structures without disrupting RCU readers. This guarantee can be used in combination with the grace-period guarantee to also allow @@ -462,9 +462,9 @@ again without disrupting RCU readers. This guarantee was only partially premeditated. DYNIX/ptx used an explicit memory barrier for publication, but had nothing resembling -``rcu_dereference()`` for subscription, nor did it have anything +rcu_dereference() for subscription, nor did it have anything resembling the dependency-ordering barrier that was later subsumed -into ``rcu_dereference()`` and later still into ``READ_ONCE()``. The +into rcu_dereference() and later still into READ_ONCE(). The need for these operations made itself known quite suddenly at a late-1990s meeting with the DEC Alpha architects, back in the days when DEC was still a free-standing company. It took the Alpha architects a @@ -474,7 +474,7 @@ documentation did not make this point clear. More recent work with the C and C++ standards committees have provided much education on tricks and traps from the compiler. In short, compilers were much less tricky in the early 1990s, but in 2015, don't even think about omitting -``rcu_dereference()``! +rcu_dereference()! Memory-Barrier Guarantees ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -484,31 +484,31 @@ demonstrates the need for RCU's stringent memory-ordering guarantees on systems with more than one CPU: #. Each CPU that has an RCU read-side critical section that begins - before ``synchronize_rcu()`` starts is guaranteed to execute a full + before synchronize_rcu() starts is guaranteed to execute a full memory barrier between the time that the RCU read-side critical - section ends and the time that ``synchronize_rcu()`` returns. Without + section ends and the time that synchronize_rcu() returns. Without this guarantee, a pre-existing RCU read-side critical section might hold a reference to the newly removed ``struct foo`` after the - ``kfree()`` on line 14 of ``remove_gp_synchronous()``. + kfree() on line 14 of remove_gp_synchronous(). #. Each CPU that has an RCU read-side critical section that ends after - ``synchronize_rcu()`` returns is guaranteed to execute a full memory - barrier between the time that ``synchronize_rcu()`` begins and the + synchronize_rcu() returns is guaranteed to execute a full memory + barrier between the time that synchronize_rcu() begins and the time that the RCU read-side critical section begins. Without this guarantee, a later RCU read-side critical section running after the - ``kfree()`` on line 14 of ``remove_gp_synchronous()`` might later run - ``do_something_gp()`` and find the newly deleted ``struct foo``. -#. If the task invoking ``synchronize_rcu()`` remains on a given CPU, + kfree() on line 14 of remove_gp_synchronous() might later run + do_something_gp() and find the newly deleted ``struct foo``. +#. If the task invoking synchronize_rcu() remains on a given CPU, then that CPU is guaranteed to execute a full memory barrier sometime - during the execution of ``synchronize_rcu()``. This guarantee ensures - that the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really + during the execution of synchronize_rcu(). This guarantee ensures + that the kfree() on line 14 of remove_gp_synchronous() really does execute after the removal on line 11. -#. If the task invoking ``synchronize_rcu()`` migrates among a group of +#. If the task invoking synchronize_rcu() migrates among a group of CPUs during that invocation, then each of the CPUs in that group is guaranteed to execute a full memory barrier sometime during the - execution of ``synchronize_rcu()``. This guarantee also ensures that - the ``kfree()`` on line 14 of ``remove_gp_synchronous()`` really does + execution of synchronize_rcu(). This guarantee also ensures that + the kfree() on line 14 of remove_gp_synchronous() really does execute after the removal on line 11, but also in the case where the - thread executing the ``synchronize_rcu()`` migrates in the meantime. + thread executing the synchronize_rcu() migrates in the meantime. +-----------------------------------------------------------------------+ | **Quick Quiz**: | @@ -516,19 +516,19 @@ systems with more than one CPU: | Given that multiple CPUs can start RCU read-side critical sections at | | any time without any ordering whatsoever, how can RCU possibly tell | | whether or not a given RCU read-side critical section starts before a | -| given instance of ``synchronize_rcu()``? | +| given instance of synchronize_rcu()? | +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ | If RCU cannot tell whether or not a given RCU read-side critical | -| section starts before a given instance of ``synchronize_rcu()``, then | +| section starts before a given instance of synchronize_rcu(), then | | it must assume that the RCU read-side critical section started first. | -| In other words, a given instance of ``synchronize_rcu()`` can avoid | +| In other words, a given instance of synchronize_rcu() can avoid | | waiting on a given RCU read-side critical section only if it can | -| prove that ``synchronize_rcu()`` started first. | -| A related question is “When ``rcu_read_lock()`` doesn't generate any | +| prove that synchronize_rcu() started first. | +| A related question is “When rcu_read_lock() doesn't generate any | | code, why does it matter how it relates to a grace period?” The | -| answer is that it is not the relationship of ``rcu_read_lock()`` | +| answer is that it is not the relationship of rcu_read_lock() | | itself that is important, but rather the relationship of the code | | within the enclosed RCU read-side critical section to the code | | preceding and following the grace period. If we take this viewpoint, | @@ -556,14 +556,14 @@ systems with more than one CPU: | Yes, they really are required. To see why the first guarantee is | | required, consider the following sequence of events: | | | -| #. CPU 1: ``rcu_read_lock()`` | +| #. CPU 1: rcu_read_lock() | | #. CPU 1: ``q = rcu_dereference(gp); /* Very likely to return p. */`` | | #. CPU 0: ``list_del_rcu(p);`` | -| #. CPU 0: ``synchronize_rcu()`` starts. | +| #. CPU 0: synchronize_rcu() starts. | | #. CPU 1: ``do_something_with(q->a);`` | | ``/* No smp_mb(), so might happen after kfree(). */`` | -| #. CPU 1: ``rcu_read_unlock()`` | -| #. CPU 0: ``synchronize_rcu()`` returns. | +| #. CPU 1: rcu_read_unlock() | +| #. CPU 0: synchronize_rcu() returns. | | #. CPU 0: ``kfree(p);`` | | | | Therefore, there absolutely must be a full memory barrier between the | @@ -574,14 +574,14 @@ systems with more than one CPU: | is roughly similar: | | | | #. CPU 0: ``list_del_rcu(p);`` | -| #. CPU 0: ``synchronize_rcu()`` starts. | -| #. CPU 1: ``rcu_read_lock()`` | +| #. CPU 0: synchronize_rcu() starts. | +| #. CPU 1: rcu_read_lock() | | #. CPU 1: ``q = rcu_dereference(gp);`` | | ``/* Might return p if no memory barrier. */`` | -| #. CPU 0: ``synchronize_rcu()`` returns. | +| #. CPU 0: synchronize_rcu() returns. | | #. CPU 0: ``kfree(p);`` | | #. CPU 1: ``do_something_with(q->a); /* Boom!!! */`` | -| #. CPU 1: ``rcu_read_unlock()`` | +| #. CPU 1: rcu_read_unlock() | | | | And similarly, without a memory barrier between the beginning of the | | grace period and the beginning of the RCU read-side critical section, | @@ -597,7 +597,7 @@ systems with more than one CPU: +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| You claim that ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate | +| You claim that rcu_read_lock() and rcu_read_unlock() generate | | absolutely no code in some kernel builds. This means that the | | compiler might arbitrarily rearrange consecutive RCU read-side | | critical sections. Given such rearrangement, if a given RCU read-side | @@ -607,11 +607,11 @@ systems with more than one CPU: +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ -| In cases where ``rcu_read_lock()`` and ``rcu_read_unlock()`` generate | +| In cases where rcu_read_lock() and rcu_read_unlock() generate | | absolutely no code, RCU infers quiescent states only at special | | locations, for example, within the scheduler. Because calls to | -| ``schedule()`` had better prevent calling-code accesses to shared | -| variables from being rearranged across the call to ``schedule()``, if | +| schedule() had better prevent calling-code accesses to shared | +| variables from being rearranged across the call to schedule(), if | | RCU detects the end of a given RCU read-side critical section, it | | will necessarily detect the end of all prior RCU read-side critical | | sections, no matter how aggressively the compiler scrambles the code. | @@ -655,8 +655,8 @@ read-side critical section might search for a given data element, and then might acquire the update-side spinlock in order to update that element, all while remaining in that RCU read-side critical section. Of course, it is necessary to exit the RCU read-side critical section -before invoking ``synchronize_rcu()``, however, this inconvenience can -be avoided through use of the ``call_rcu()`` and ``kfree_rcu()`` API +before invoking synchronize_rcu(), however, this inconvenience can +be avoided through use of the call_rcu() and kfree_rcu() API members described later in this document. +-----------------------------------------------------------------------+ @@ -694,10 +694,10 @@ these non-guarantees were premeditated. Readers Impose Minimal Ordering ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Reader-side markers such as ``rcu_read_lock()`` and -``rcu_read_unlock()`` provide absolutely no ordering guarantees except +Reader-side markers such as rcu_read_lock() and +rcu_read_unlock() provide absolutely no ordering guarantees except through their interaction with the grace-period APIs such as -``synchronize_rcu()``. To see this, consider the following pair of +synchronize_rcu(). To see this, consider the following pair of threads: :: @@ -722,7 +722,7 @@ threads: 18 rcu_read_unlock(); 19 } -After ``thread0()`` and ``thread1()`` execute concurrently, it is quite +After thread0() and thread1() execute concurrently, it is quite possible to have :: @@ -730,7 +730,7 @@ possible to have (r1 == 1 && r2 == 0) (that is, ``y`` appears to have been assigned before ``x``), which would -not be possible if ``rcu_read_lock()`` and ``rcu_read_unlock()`` had +not be possible if rcu_read_lock() and rcu_read_unlock() had much in the way of ordering properties. But they do not, so the CPU is within its rights to do significant reordering. This is by design: Any significant ordering constraints would slow down these fast-path APIs. @@ -742,14 +742,14 @@ significant ordering constraints would slow down these fast-path APIs. +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ -| No, the volatile casts in ``READ_ONCE()`` and ``WRITE_ONCE()`` | +| No, the volatile casts in READ_ONCE() and WRITE_ONCE() | | prevent the compiler from reordering in this particular case. | +-----------------------------------------------------------------------+ Readers Do Not Exclude Updaters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Neither ``rcu_read_lock()`` nor ``rcu_read_unlock()`` exclude updates. +Neither rcu_read_lock() nor rcu_read_unlock() exclude updates. All they do is to prevent grace periods from ending. The following example illustrates this: @@ -775,19 +775,19 @@ example illustrates this: 18 spin_unlock(&my_lock); 19 } -If the ``thread0()`` function's ``rcu_read_lock()`` excluded the -``thread1()`` function's update, the ``WARN_ON()`` could never fire. But -the fact is that ``rcu_read_lock()`` does not exclude much of anything -aside from subsequent grace periods, of which ``thread1()`` has none, so -the ``WARN_ON()`` can and does fire. +If the thread0() function's rcu_read_lock() excluded the +thread1() function's update, the WARN_ON() could never fire. But +the fact is that rcu_read_lock() does not exclude much of anything +aside from subsequent grace periods, of which thread1() has none, so +the WARN_ON() can and does fire. Updaters Only Wait For Old Readers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -It might be tempting to assume that after ``synchronize_rcu()`` +It might be tempting to assume that after synchronize_rcu() completes, there are no readers executing. This temptation must be avoided because new readers can start immediately after -``synchronize_rcu()`` starts, and ``synchronize_rcu()`` is under no +synchronize_rcu() starts, and synchronize_rcu() is under no obligation to wait for these new readers. +-----------------------------------------------------------------------+ @@ -799,10 +799,10 @@ obligation to wait for these new readers. +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ -| For no time at all. Even if ``synchronize_rcu()`` were to wait until | +| For no time at all. Even if synchronize_rcu() were to wait until | | all readers had completed, a new reader might start immediately after | -| ``synchronize_rcu()`` completed. Therefore, the code following | -| ``synchronize_rcu()`` can *never* rely on there being no readers. | +| synchronize_rcu() completed. Therefore, the code following | +| synchronize_rcu() can *never* rely on there being no readers. | +-----------------------------------------------------------------------+ Grace Periods Don't Partition Read-Side Critical Sections @@ -892,12 +892,12 @@ period is known to end before the second grace period starts: 28 rcu_read_unlock(); 29 } -Here, if ``(r1 == 1)``, then ``thread0()``'s write to ``b`` must happen -before the end of ``thread1()``'s grace period. If in addition -``(r4 == 1)``, then ``thread3()``'s read from ``b`` must happen after -the beginning of ``thread2()``'s grace period. If it is also the case -that ``(r2 == 1)``, then the end of ``thread1()``'s grace period must -precede the beginning of ``thread2()``'s grace period. This mean that +Here, if ``(r1 == 1)``, then thread0()'s write to ``b`` must happen +before the end of thread1()'s grace period. If in addition +``(r4 == 1)``, then thread3()'s read from ``b`` must happen after +the beginning of thread2()'s grace period. If it is also the case +that ``(r2 == 1)``, then the end of thread1()'s grace period must +precede the beginning of thread2()'s grace period. This mean that the two RCU read-side critical sections cannot overlap, guaranteeing that ``(r3 == 1)``. As a result, the outcome: @@ -1076,8 +1076,8 @@ is captured by the following list of situations: b. Wait-free read-side primitives for real-time use. This focus on read-mostly situations means that RCU must interoperate -with other synchronization primitives. For example, the ``add_gp()`` and -``remove_gp_synchronous()`` examples discussed earlier use RCU to +with other synchronization primitives. For example, the add_gp() and +remove_gp_synchronous() examples discussed earlier use RCU to protect readers and locking to coordinate updaters. However, the need extends much farther, requiring that a variety of synchronization primitives be legal within RCU read-side critical sections, including @@ -1104,11 +1104,11 @@ memory barriers. | sections. | | Note that it *is* legal for a normal RCU read-side critical section | | to conditionally acquire a sleeping locks (as in | -| ``mutex_trylock()``), but only as long as it does not loop | +| mutex_trylock()), but only as long as it does not loop | | indefinitely attempting to conditionally acquire that sleeping locks. | -| The key point is that things like ``mutex_trylock()`` either return | +| The key point is that things like mutex_trylock() either return | | with the mutex held, or return an error indication if the mutex was | -| not immediately available. Either way, ``mutex_trylock()`` returns | +| not immediately available. Either way, mutex_trylock() returns | | immediately without sleeping. | +-----------------------------------------------------------------------+ @@ -1191,57 +1191,57 @@ for those kernels not needing it. The remaining performance requirements are, for the most part, unsurprising. For example, in keeping with RCU's read-side -specialization, ``rcu_dereference()`` should have negligible overhead +specialization, rcu_dereference() should have negligible overhead (for example, suppression of a few minor compiler optimizations). -Similarly, in non-preemptible environments, ``rcu_read_lock()`` and -``rcu_read_unlock()`` should have exactly zero overhead. +Similarly, in non-preemptible environments, rcu_read_lock() and +rcu_read_unlock() should have exactly zero overhead. In preemptible environments, in the case where the RCU read-side critical section was not preempted (as will be the case for the -highest-priority real-time process), ``rcu_read_lock()`` and -``rcu_read_unlock()`` should have minimal overhead. In particular, they +highest-priority real-time process), rcu_read_lock() and +rcu_read_unlock() should have minimal overhead. In particular, they should not contain atomic read-modify-write operations, memory-barrier instructions, preemption disabling, interrupt disabling, or backwards branches. However, in the case where the RCU read-side critical section -was preempted, ``rcu_read_unlock()`` may acquire spinlocks and disable +was preempted, rcu_read_unlock() may acquire spinlocks and disable interrupts. This is why it is better to nest an RCU read-side critical section within a preempt-disable region than vice versa, at least in cases where that critical section is short enough to avoid unduly degrading real-time latencies. -The ``synchronize_rcu()`` grace-period-wait primitive is optimized for +The synchronize_rcu() grace-period-wait primitive is optimized for throughput. It may therefore incur several milliseconds of latency in addition to the duration of the longest RCU read-side critical section. On the other hand, multiple concurrent invocations of -``synchronize_rcu()`` are required to use batching optimizations so that +synchronize_rcu() are required to use batching optimizations so that they can be satisfied by a single underlying grace-period-wait operation. For example, in the Linux kernel, it is not unusual for a single grace-period-wait operation to serve more than `1,000 separate invocations `__ -of ``synchronize_rcu()``, thus amortizing the per-invocation overhead +of synchronize_rcu(), thus amortizing the per-invocation overhead down to nearly zero. However, the grace-period optimization is also required to avoid measurable degradation of real-time scheduling and interrupt latencies. -In some cases, the multi-millisecond ``synchronize_rcu()`` latencies are -unacceptable. In these cases, ``synchronize_rcu_expedited()`` may be +In some cases, the multi-millisecond synchronize_rcu() latencies are +unacceptable. In these cases, synchronize_rcu_expedited() may be used instead, reducing the grace-period latency down to a few tens of microseconds on small systems, at least in cases where the RCU read-side critical sections are short. There are currently no special latency -requirements for ``synchronize_rcu_expedited()`` on large systems, but, +requirements for synchronize_rcu_expedited() on large systems, but, consistent with the empirical nature of the RCU specification, that is subject to change. However, there most definitely are scalability -requirements: A storm of ``synchronize_rcu_expedited()`` invocations on +requirements: A storm of synchronize_rcu_expedited() invocations on 4096 CPUs should at least make reasonable forward progress. In return -for its shorter latencies, ``synchronize_rcu_expedited()`` is permitted +for its shorter latencies, synchronize_rcu_expedited() is permitted to impose modest degradation of real-time latency on non-idle online CPUs. Here, “modest” means roughly the same latency degradation as a scheduling-clock interrupt. There are a number of situations where even -``synchronize_rcu_expedited()``'s reduced grace-period latency is -unacceptable. In these situations, the asynchronous ``call_rcu()`` can -be used in place of ``synchronize_rcu()`` as follows: +synchronize_rcu_expedited()'s reduced grace-period latency is +unacceptable. In these situations, the asynchronous call_rcu() can +be used in place of synchronize_rcu() as follows: :: @@ -1275,19 +1275,19 @@ be used in place of ``synchronize_rcu()`` as follows: 28 } A definition of ``struct foo`` is finally needed, and appears on -lines 1-5. The function ``remove_gp_cb()`` is passed to ``call_rcu()`` +lines 1-5. The function remove_gp_cb() is passed to call_rcu() on line 25, and will be invoked after the end of a subsequent grace -period. This gets the same effect as ``remove_gp_synchronous()``, but +period. This gets the same effect as remove_gp_synchronous(), but without forcing the updater to wait for a grace period to elapse. The -``call_rcu()`` function may be used in a number of situations where -neither ``synchronize_rcu()`` nor ``synchronize_rcu_expedited()`` would -be legal, including within preempt-disable code, ``local_bh_disable()`` +call_rcu() function may be used in a number of situations where +neither synchronize_rcu() nor synchronize_rcu_expedited() would +be legal, including within preempt-disable code, local_bh_disable() code, interrupt-disable code, and interrupt handlers. However, even -``call_rcu()`` is illegal within NMI handlers and from idle and offline -CPUs. The callback function (``remove_gp_cb()`` in this case) will be +call_rcu() is illegal within NMI handlers and from idle and offline +CPUs. The callback function (remove_gp_cb() in this case) will be executed within softirq (software interrupt) environment within the Linux kernel, either within a real softirq handler or under the -protection of ``local_bh_disable()``. In both the Linux kernel and in +protection of local_bh_disable(). In both the Linux kernel and in userspace, it is bad practice to write an RCU callback function that takes too long. Long-running operations should be relegated to separate threads or (in the Linux kernel) workqueues. @@ -1295,23 +1295,23 @@ threads or (in the Linux kernel) workqueues. +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| Why does line 19 use ``rcu_access_pointer()``? After all, | -| ``call_rcu()`` on line 25 stores into the structure, which would | +| Why does line 19 use rcu_access_pointer()? After all, | +| call_rcu() on line 25 stores into the structure, which would | | interact badly with concurrent insertions. Doesn't this mean that | -| ``rcu_dereference()`` is required? | +| rcu_dereference() is required? | +-----------------------------------------------------------------------+ | **Answer**: | +-----------------------------------------------------------------------+ | Presumably the ``->gp_lock`` acquired on line 18 excludes any | -| changes, including any insertions that ``rcu_dereference()`` would | +| changes, including any insertions that rcu_dereference() would | | protect against. Therefore, any insertions will be delayed until | | after ``->gp_lock`` is released on line 25, which in turn means that | -| ``rcu_access_pointer()`` suffices. | +| rcu_access_pointer() suffices. | +-----------------------------------------------------------------------+ -However, all that ``remove_gp_cb()`` is doing is invoking ``kfree()`` on +However, all that remove_gp_cb() is doing is invoking kfree() on the data element. This is a common idiom, and is supported by -``kfree_rcu()``, which allows “fire and forget” operation as shown +kfree_rcu(), which allows “fire and forget” operation as shown below: :: @@ -1338,20 +1338,20 @@ below: 20 return true; 21 } -Note that ``remove_gp_faf()`` simply invokes ``kfree_rcu()`` and +Note that remove_gp_faf() simply invokes kfree_rcu() and proceeds, without any need to pay any further attention to the -subsequent grace period and ``kfree()``. It is permissible to invoke -``kfree_rcu()`` from the same environments as for ``call_rcu()``. -Interestingly enough, DYNIX/ptx had the equivalents of ``call_rcu()`` -and ``kfree_rcu()``, but not ``synchronize_rcu()``. This was due to the +subsequent grace period and kfree(). It is permissible to invoke +kfree_rcu() from the same environments as for call_rcu(). +Interestingly enough, DYNIX/ptx had the equivalents of call_rcu() +and kfree_rcu(), but not synchronize_rcu(). This was due to the fact that RCU was not heavily used within DYNIX/ptx, so the very few -places that needed something like ``synchronize_rcu()`` simply +places that needed something like synchronize_rcu() simply open-coded it. +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ -| Earlier it was claimed that ``call_rcu()`` and ``kfree_rcu()`` | +| Earlier it was claimed that call_rcu() and kfree_rcu() | | allowed updaters to avoid being blocked by readers. But how can that | | be correct, given that the invocation of the callback and the freeing | | of the memory (respectively) must still wait for a grace period to | @@ -1363,16 +1363,16 @@ open-coded it. | definition would say that updates in garbage-collected languages | | cannot complete until the next time the garbage collector runs, which | | does not seem at all reasonable. The key point is that in most cases, | -| an updater using either ``call_rcu()`` or ``kfree_rcu()`` can proceed | -| to the next update as soon as it has invoked ``call_rcu()`` or | -| ``kfree_rcu()``, without having to wait for a subsequent grace | +| an updater using either call_rcu() or kfree_rcu() can proceed | +| to the next update as soon as it has invoked call_rcu() or | +| kfree_rcu(), without having to wait for a subsequent grace | | period. | +-----------------------------------------------------------------------+ But what if the updater must wait for the completion of code to be executed after the end of the grace period, but has other tasks that can be carried out in the meantime? The polling-style -``get_state_synchronize_rcu()`` and ``cond_synchronize_rcu()`` functions +get_state_synchronize_rcu() and cond_synchronize_rcu() functions may be used for this purpose, as shown below: :: @@ -1397,11 +1397,11 @@ may be used for this purpose, as shown below: 18 return true; 19 } -On line 14, ``get_state_synchronize_rcu()`` obtains a “cookie” from RCU, +On line 14, get_state_synchronize_rcu() obtains a “cookie” from RCU, then line 15 carries out other tasks, and finally, line 16 returns immediately if a grace period has elapsed in the meantime, but otherwise waits as required. The need for ``get_state_synchronize_rcu`` and -``cond_synchronize_rcu()`` has appeared quite recently, so it is too +cond_synchronize_rcu() has appeared quite recently, so it is too early to tell whether they will stand the test of time. RCU thus provides a range of tools to allow updaters to strike the @@ -1421,8 +1421,8 @@ example, an infinite loop in an RCU read-side critical section must by definition prevent later grace periods from ever completing. For a more involved example, consider a 64-CPU system built with ``CONFIG_RCU_NOCB_CPU=y`` and booted with ``rcu_nocbs=1-63``, where -CPUs 1 through 63 spin in tight loops that invoke ``call_rcu()``. Even -if these tight loops also contain calls to ``cond_resched()`` (thus +CPUs 1 through 63 spin in tight loops that invoke call_rcu(). Even +if these tight loops also contain calls to cond_resched() (thus allowing grace periods to complete), CPU 0 simply will not be able to invoke callbacks as fast as the other 63 CPUs can register them, at least not until the system runs out of memory. In both of these @@ -1435,21 +1435,21 @@ RCU takes the following steps to encourage timely completion of grace periods: #. If a grace period fails to complete within 100 milliseconds, RCU - causes future invocations of ``cond_resched()`` on the holdout CPUs + causes future invocations of cond_resched() on the holdout CPUs to provide an RCU quiescent state. RCU also causes those CPUs' - ``need_resched()`` invocations to return ``true``, but only after the + need_resched() invocations to return ``true``, but only after the corresponding CPU's next scheduling-clock. #. CPUs mentioned in the ``nohz_full`` kernel boot parameter can run indefinitely in the kernel without scheduling-clock interrupts, which - defeats the above ``need_resched()`` strategem. RCU will therefore - invoke ``resched_cpu()`` on any ``nohz_full`` CPUs still holding out + defeats the above need_resched() strategem. RCU will therefore + invoke resched_cpu() on any ``nohz_full`` CPUs still holding out after 109 milliseconds. #. In kernels built with ``CONFIG_RCU_BOOST=y``, if a given task that has been preempted within an RCU read-side critical section is holding out for more than 500 milliseconds, RCU will resort to priority boosting. #. If a CPU is still holding out 10 seconds into the grace period, RCU - will invoke ``resched_cpu()`` on it regardless of its ``nohz_full`` + will invoke resched_cpu() on it regardless of its ``nohz_full`` state. The above values are defaults for systems running with ``HZ=1000``. They @@ -1460,7 +1460,7 @@ caution when changing them. Note that these forward-progress measures are provided only for RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks RCU <#Tasks%20RCU>`__. -RCU takes the following steps in ``call_rcu()`` to encourage timely +RCU takes the following steps in call_rcu() to encourage timely invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has 10,000 callbacks, or has 10,000 more callbacks than it had the last time encouragement was provided: @@ -1481,8 +1481,8 @@ RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks RCU <#Tasks%20RCU>`__. Even for RCU, callback-invocation forward progress for ``rcu_nocbs`` CPUs is much less well-developed, in part because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke -``call_rcu()`` relatively infrequently. If workloads emerge that need -both ``rcu_nocbs`` CPUs and high ``call_rcu()`` invocation rates, then +call_rcu() relatively infrequently. If workloads emerge that need +both ``rcu_nocbs`` CPUs and high call_rcu() invocation rates, then additional forward-progress work will be required. Composability @@ -1496,11 +1496,11 @@ in fact may be nested arbitrarily deeply. In practice, as with all real-world implementations of composable constructs, there are limitations. -Implementations of RCU for which ``rcu_read_lock()`` and -``rcu_read_unlock()`` generate no code, such as Linux-kernel RCU when +Implementations of RCU for which rcu_read_lock() and +rcu_read_unlock() generate no code, such as Linux-kernel RCU when ``CONFIG_PREEMPT=n``, can be nested arbitrarily deeply. After all, there is no overhead. Except that if all these instances of -``rcu_read_lock()`` and ``rcu_read_unlock()`` are visible to the +rcu_read_lock() and rcu_read_unlock() are visible to the compiler, compilation will eventually fail due to exhausting memory, mass storage, or user patience, whichever comes first. If the nesting is not visible to the compiler, as is the case with mutually recursive @@ -1558,11 +1558,11 @@ argue that such workloads should instead use something other than RCU, the fact remains that RCU must handle such workloads gracefully. This requirement is another factor driving batching of grace periods, but it is also the driving force behind the checks for large numbers of queued -RCU callbacks in the ``call_rcu()`` code path. Finally, high update +RCU callbacks in the call_rcu() code path. Finally, high update rates should not delay RCU read-side critical sections, although some small read-side delays can occur when using -``synchronize_rcu_expedited()``, courtesy of this function's use of -``smp_call_function_single()``. +synchronize_rcu_expedited(), courtesy of this function's use of +smp_call_function_single(). Although all three of these corner cases were understood in the early 1990s, a simple user-level test consisting of ``close(open(path))`` in a @@ -1583,45 +1583,45 @@ Software-Engineering Requirements Between Murphy's Law and “To err is human”, it is necessary to guard against mishaps and misuse: -#. It is all too easy to forget to use ``rcu_read_lock()`` everywhere +#. It is all too easy to forget to use rcu_read_lock() everywhere that it is needed, so kernels built with ``CONFIG_PROVE_RCU=y`` will - splat if ``rcu_dereference()`` is used outside of an RCU read-side + splat if rcu_dereference() is used outside of an RCU read-side critical section. Update-side code can use - ``rcu_dereference_protected()``, which takes a `lockdep + rcu_dereference_protected(), which takes a `lockdep expression `__ to indicate what is providing the protection. If the indicated protection is not provided, a lockdep splat is emitted. Code shared between readers and updaters can use - ``rcu_dereference_check()``, which also takes a lockdep expression, - and emits a lockdep splat if neither ``rcu_read_lock()`` nor the + rcu_dereference_check(), which also takes a lockdep expression, + and emits a lockdep splat if neither rcu_read_lock() nor the indicated protection is in place. In addition, - ``rcu_dereference_raw()`` is used in those (hopefully rare) cases + rcu_dereference_raw() is used in those (hopefully rare) cases where the required protection cannot be easily described. Finally, - ``rcu_read_lock_held()`` is provided to allow a function to verify + rcu_read_lock_held() is provided to allow a function to verify that it has been invoked within an RCU read-side critical section. I was made aware of this set of requirements shortly after Thomas Gleixner audited a number of RCU uses. #. A given function might wish to check for RCU-related preconditions upon entry, before using any other RCU API. The - ``rcu_lockdep_assert()`` does this job, asserting the expression in + rcu_lockdep_assert() does this job, asserting the expression in kernels having lockdep enabled and doing nothing otherwise. -#. It is also easy to forget to use ``rcu_assign_pointer()`` and - ``rcu_dereference()``, perhaps (incorrectly) substituting a simple +#. It is also easy to forget to use rcu_assign_pointer() and + rcu_dereference(), perhaps (incorrectly) substituting a simple assignment. To catch this sort of error, a given RCU-protected pointer may be tagged with ``__rcu``, after which sparse will complain about simple-assignment accesses to that pointer. Arnd Bergmann made me aware of this requirement, and also supplied the needed `patch series `__. #. Kernels built with ``CONFIG_DEBUG_OBJECTS_RCU_HEAD=y`` will splat if - a data element is passed to ``call_rcu()`` twice in a row, without a + a data element is passed to call_rcu() twice in a row, without a grace period in between. (This error is similar to a double free.) The corresponding ``rcu_head`` structures that are dynamically allocated are automatically tracked, but ``rcu_head`` structures allocated on the stack must be initialized with - ``init_rcu_head_on_stack()`` and cleaned up with - ``destroy_rcu_head_on_stack()``. Similarly, statically allocated + init_rcu_head_on_stack() and cleaned up with + destroy_rcu_head_on_stack(). Similarly, statically allocated non-stack ``rcu_head`` structures must be initialized with - ``init_rcu_head()`` and cleaned up with ``destroy_rcu_head()``. + init_rcu_head() and cleaned up with destroy_rcu_head(). Mathieu Desnoyers made me aware of this requirement, and also supplied the needed `patch `__. @@ -1638,9 +1638,9 @@ against mishaps and misuse: ``rcupdate.rcu_cpu_stall_suppress`` to suppress the splats. This kernel parameter may also be set via ``sysfs``. Furthermore, RCU CPU stall warnings are counter-productive during sysrq dumps and during - panics. RCU therefore supplies the ``rcu_sysrq_start()`` and - ``rcu_sysrq_end()`` API members to be called before and after long - sysrq dumps. RCU also supplies the ``rcu_panic()`` notifier that is + panics. RCU therefore supplies the rcu_sysrq_start() and + rcu_sysrq_end() API members to be called before and after long + sysrq dumps. RCU also supplies the rcu_panic() notifier that is automatically invoked at the beginning of a panic to suppress further RCU CPU stall warnings. @@ -1656,7 +1656,7 @@ against mishaps and misuse: synchronization mechanism, for example, reference counting. #. In kernels built with ``CONFIG_RCU_TRACE=y``, RCU-related information is provided via event tracing. -#. Open-coded use of ``rcu_assign_pointer()`` and ``rcu_dereference()`` +#. Open-coded use of rcu_assign_pointer() and rcu_dereference() to create typical linked data structures can be surprisingly error-prone. Therefore, RCU-protected `linked lists `__ and, @@ -1665,11 +1665,11 @@ against mishaps and misuse: other special-purpose RCU-protected data structures are available in the Linux kernel and the userspace RCU library. #. Some linked structures are created at compile time, but still require - ``__rcu`` checking. The ``RCU_POINTER_INITIALIZER()`` macro serves + ``__rcu`` checking. The RCU_POINTER_INITIALIZER() macro serves this purpose. -#. It is not necessary to use ``rcu_assign_pointer()`` when creating +#. It is not necessary to use rcu_assign_pointer() when creating linked structures that are to be published via a single external - pointer. The ``RCU_INIT_POINTER()`` macro is provided for this task + pointer. The RCU_INIT_POINTER() macro is provided for this task and also for assigning ``NULL`` pointers at runtime. This not a hard-and-fast list: RCU's diagnostic capabilities will @@ -1743,17 +1743,17 @@ Early Boot ~~~~~~~~~~ The Linux kernel's boot sequence is an interesting process, and RCU is -used early, even before ``rcu_init()`` is invoked. In fact, a number of +used early, even before rcu_init() is invoked. In fact, a number of RCU's primitives can be used as soon as the initial task's ``task_struct`` is available and the boot CPU's per-CPU variables are -set up. The read-side primitives (``rcu_read_lock()``, -``rcu_read_unlock()``, ``rcu_dereference()``, and -``rcu_access_pointer()``) will operate normally very early on, as will -``rcu_assign_pointer()``. +set up. The read-side primitives (rcu_read_lock(), +rcu_read_unlock(), rcu_dereference(), and +rcu_access_pointer()) will operate normally very early on, as will +rcu_assign_pointer(). -Although ``call_rcu()`` may be invoked at any time during boot, +Although call_rcu() may be invoked at any time during boot, callbacks are not guaranteed to be invoked until after all of RCU's -kthreads have been spawned, which occurs at ``early_initcall()`` time. +kthreads have been spawned, which occurs at early_initcall() time. This delay in callback invocation is due to the fact that RCU does not invoke callbacks until it is fully initialized, and this full initialization cannot occur until after the scheduler has initialized @@ -1762,22 +1762,22 @@ it would be possible to invoke callbacks earlier, however, this is not a panacea because there would be severe restrictions on what operations those callbacks could invoke. -Perhaps surprisingly, ``synchronize_rcu()`` and -``synchronize_rcu_expedited()``, will operate normally during very early +Perhaps surprisingly, synchronize_rcu() and +synchronize_rcu_expedited(), will operate normally during very early boot, the reason being that there is only one CPU and preemption is -disabled. This means that the call ``synchronize_rcu()`` (or friends) +disabled. This means that the call synchronize_rcu() (or friends) itself is a quiescent state and thus a grace period, so the early-boot implementation can be a no-op. However, once the scheduler has spawned its first kthread, this early -boot trick fails for ``synchronize_rcu()`` (as well as for -``synchronize_rcu_expedited()``) in ``CONFIG_PREEMPT=y`` kernels. The +boot trick fails for synchronize_rcu() (as well as for +synchronize_rcu_expedited()) in ``CONFIG_PREEMPT=y`` kernels. The reason is that an RCU read-side critical section might be preempted, -which means that a subsequent ``synchronize_rcu()`` really does have to +which means that a subsequent synchronize_rcu() really does have to wait for something, as opposed to simply returning immediately. -Unfortunately, ``synchronize_rcu()`` can't do this until all of its +Unfortunately, synchronize_rcu() can't do this until all of its kthreads are spawned, which doesn't happen until some time during -``early_initcalls()`` time. But this is no excuse: RCU is nevertheless +early_initcalls() time. But this is no excuse: RCU is nevertheless required to correctly handle synchronous grace periods during this time period. Once all of its kthreads are up and running, RCU starts running normally. @@ -1820,7 +1820,7 @@ Interrupts and NMIs The Linux kernel has interrupts, and RCU read-side critical sections are legal within interrupt handlers and within interrupt-disabled regions of -code, as are invocations of ``call_rcu()``. +code, as are invocations of call_rcu(). Some Linux-kernel architectures can enter an interrupt handler from non-idle process context, and then just never leave it, instead @@ -1832,7 +1832,7 @@ way during a rewrite of RCU's dyntick-idle code. The Linux kernel has non-maskable interrupts (NMIs), and RCU read-side critical sections are legal within NMI handlers. Thankfully, RCU -update-side primitives, including ``call_rcu()``, are prohibited within +update-side primitives, including call_rcu(), are prohibited within NMI handlers. The name notwithstanding, some Linux-kernel architectures can have @@ -1844,10 +1844,10 @@ that meets this requirement. Furthermore, NMI handlers can be interrupted by what appear to RCU to be normal interrupts. One way that this can happen is for code that -directly invokes ``rcu_irq_enter()`` and ``rcu_irq_exit()`` to be called +directly invokes rcu_irq_enter() and rcu_irq_exit() to be called from an NMI handler. This astonishing fact of life prompted the current -code structure, which has ``rcu_irq_enter()`` invoking -``rcu_nmi_enter()`` and ``rcu_irq_exit()`` invoking ``rcu_nmi_exit()``. +code structure, which has rcu_irq_enter() invoking +rcu_nmi_enter() and rcu_irq_exit() invoking rcu_nmi_exit(). And yes, I also learned of this requirement the hard way. Loadable Modules @@ -1857,45 +1857,45 @@ The Linux kernel has loadable modules, and these modules can also be unloaded. After a given module has been unloaded, any attempt to call one of its functions results in a segmentation fault. The module-unload functions must therefore cancel any delayed calls to loadable-module -functions, for example, any outstanding ``mod_timer()`` must be dealt -with via ``del_timer_sync()`` or similar. +functions, for example, any outstanding mod_timer() must be dealt +with via del_timer_sync() or similar. Unfortunately, there is no way to cancel an RCU callback; once you -invoke ``call_rcu()``, the callback function is eventually going to be +invoke call_rcu(), the callback function is eventually going to be invoked, unless the system goes down first. Because it is normally considered socially irresponsible to crash the system in response to a module unload request, we need some other way to deal with in-flight RCU callbacks. -RCU therefore provides ``rcu_barrier()``, which waits until all +RCU therefore provides rcu_barrier(), which waits until all in-flight RCU callbacks have been invoked. If a module uses -``call_rcu()``, its exit function should therefore prevent any future -invocation of ``call_rcu()``, then invoke ``rcu_barrier()``. In theory, -the underlying module-unload code could invoke ``rcu_barrier()`` +call_rcu(), its exit function should therefore prevent any future +invocation of call_rcu(), then invoke rcu_barrier(). In theory, +the underlying module-unload code could invoke rcu_barrier() unconditionally, but in practice this would incur unacceptable latencies. Nikita Danilov noted this requirement for an analogous filesystem-unmount situation, and Dipankar Sarma incorporated -``rcu_barrier()`` into RCU. The need for ``rcu_barrier()`` for module +rcu_barrier() into RCU. The need for rcu_barrier() for module unloading became apparent later. .. important:: - The ``rcu_barrier()`` function is not, repeat, + The rcu_barrier() function is not, repeat, *not*, obligated to wait for a grace period. It is instead only required to wait for RCU callbacks that have already been posted. Therefore, if there are no RCU callbacks posted anywhere in the system, - ``rcu_barrier()`` is within its rights to return immediately. Even if - there are callbacks posted, ``rcu_barrier()`` does not necessarily need + rcu_barrier() is within its rights to return immediately. Even if + there are callbacks posted, rcu_barrier() does not necessarily need to wait for a grace period. +-----------------------------------------------------------------------+ | **Quick Quiz**: | +-----------------------------------------------------------------------+ | Wait a minute! Each RCU callbacks must wait for a grace period to | -| complete, and ``rcu_barrier()`` must wait for each pre-existing | -| callback to be invoked. Doesn't ``rcu_barrier()`` therefore need to | +| complete, and rcu_barrier() must wait for each pre-existing | +| callback to be invoked. Doesn't rcu_barrier() therefore need to | | wait for a full grace period if there is even one callback posted | | anywhere in the system? | +-----------------------------------------------------------------------+ @@ -1904,14 +1904,14 @@ unloading became apparent later. | Absolutely not!!! | | Yes, each RCU callbacks must wait for a grace period to complete, but | | it might well be partly (or even completely) finished waiting by the | -| time ``rcu_barrier()`` is invoked. In that case, ``rcu_barrier()`` | +| time rcu_barrier() is invoked. In that case, rcu_barrier() | | need only wait for the remaining portion of the grace period to | | elapse. So even if there are quite a few callbacks posted, | -| ``rcu_barrier()`` might well return quite quickly. | +| rcu_barrier() might well return quite quickly. | | | | So if you need to wait for a grace period as well as for all | | pre-existing callbacks, you will need to invoke both | -| ``synchronize_rcu()`` and ``rcu_barrier()``. If latency is a concern, | +| synchronize_rcu() and rcu_barrier(). If latency is a concern, | | you can always use workqueues to invoke them concurrently. | +-----------------------------------------------------------------------+ @@ -1929,18 +1929,18 @@ The Linux-kernel CPU-hotplug implementation has notifiers that are used to allow the various kernel subsystems (including RCU) to respond appropriately to a given CPU-hotplug operation. Most RCU operations may be invoked from CPU-hotplug notifiers, including even synchronous -grace-period operations such as (``synchronize_rcu()`` and -``synchronize_rcu_expedited()``). However, these synchronous operations +grace-period operations such as (synchronize_rcu() and +synchronize_rcu_expedited()). However, these synchronous operations do block and therefore cannot be invoked from notifiers that execute via -``stop_machine()``, specifically those between the ``CPUHP_AP_OFFLINE`` +stop_machine(), specifically those between the ``CPUHP_AP_OFFLINE`` and ``CPUHP_AP_ONLINE`` states. -In addition, all-callback-wait operations such as ``rcu_barrier()`` may +In addition, all-callback-wait operations such as rcu_barrier() may not be invoked from any CPU-hotplug notifier. This restriction is due to the fact that there are phases of CPU-hotplug operations where the outgoing CPU's callbacks will not be invoked until after the CPU-hotplug operation ends, which could also result in deadlock. Furthermore, -``rcu_barrier()`` blocks CPU-hotplug operations during its execution, +rcu_barrier() blocks CPU-hotplug operations during its execution, which results in another type of deadlock when invoked from a CPU-hotplug notifier. @@ -1955,12 +1955,12 @@ if offline CPUs block an RCU grace period for too long. An offline CPU's quiescent state will be reported either: -1. As the CPU goes offline using RCU's hotplug notifier (``rcu_report_dead()``). -2. When grace period initialization (``rcu_gp_init()``) detects a +1. As the CPU goes offline using RCU's hotplug notifier (rcu_report_dead()). +2. When grace period initialization (rcu_gp_init()) detects a race either with CPU offlining or with a task unblocking on a leaf ``rcu_node`` structure whose CPUs are all offline. -The CPU-online path (``rcu_cpu_starting()``) should never need to report +The CPU-online path (rcu_cpu_starting()) should never need to report a quiescent state for an offline CPU. However, as a debugging measure, it does emit a warning if a quiescent state was not already reported for that CPU. @@ -1984,11 +1984,11 @@ room for further improvement. There is no longer any prohibition against holding any of scheduler's runqueue or priority-inheritance spinlocks across an -``rcu_read_unlock()``, even if interrupts and preemption were enabled +rcu_read_unlock(), even if interrupts and preemption were enabled somewhere within the corresponding RCU read-side critical section. -Therefore, it is now perfectly legal to execute ``rcu_read_lock()`` +Therefore, it is now perfectly legal to execute rcu_read_lock() with preemption enabled, acquire one of the scheduler locks, and hold -that lock across the matching ``rcu_read_unlock()``. +that lock across the matching rcu_read_unlock(). Similarly, the RCU flavor consolidation has removed the need for negative nesting. The fact that interrupt-disabled regions of code act as RCU @@ -1999,7 +1999,7 @@ Tracing and RCU ~~~~~~~~~~~~~~~ It is possible to use tracing on RCU code, but tracing itself uses RCU. -For this reason, ``rcu_dereference_raw_check()`` is provided for use +For this reason, rcu_dereference_raw_check() is provided for use by tracing, which avoids the destructive recursion that could otherwise ensue. This API is also used by virtualization in some architectures, where RCU readers execute in environments in which tracing cannot be @@ -2010,12 +2010,12 @@ Accesses to User Memory and RCU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The kernel needs to access user-space memory, for example, to access data -referenced by system-call parameters. The ``get_user()`` macro does this job. +referenced by system-call parameters. The get_user() macro does this job. However, user-space memory might well be paged out, which means that -``get_user()`` might well page-fault and thus block while waiting for the +get_user() might well page-fault and thus block while waiting for the resulting I/O to complete. It would be a very bad thing for the compiler to -reorder a ``get_user()`` invocation into an RCU read-side critical section. +reorder a get_user() invocation into an RCU read-side critical section. For example, suppose that the source code looked like this: @@ -2041,22 +2041,22 @@ the following: 6 do_something_with(v, user_v); If the compiler did make this transformation in a ``CONFIG_PREEMPT=n`` kernel -build, and if ``get_user()`` did page fault, the result would be a quiescent +build, and if get_user() did page fault, the result would be a quiescent state in the middle of an RCU read-side critical section. This misplaced quiescent state could result in line 4 being a use-after-free access, which could be bad for your kernel's actuarial statistics. Similar examples -can be constructed with the call to ``get_user()`` preceding the -``rcu_read_lock()``. +can be constructed with the call to get_user() preceding the +rcu_read_lock(). -Unfortunately, ``get_user()`` doesn't have any particular ordering properties, +Unfortunately, get_user() doesn't have any particular ordering properties, and in some architectures the underlying ``asm`` isn't even marked ``volatile``. And even if it was marked ``volatile``, the above access to ``p->value`` is not volatile, so the compiler would not have any reason to keep those two accesses in order. -Therefore, the Linux-kernel definitions of ``rcu_read_lock()`` and -``rcu_read_unlock()`` must act as compiler barriers, at least for outermost -instances of ``rcu_read_lock()`` and ``rcu_read_unlock()`` within a nested set +Therefore, the Linux-kernel definitions of rcu_read_lock() and +rcu_read_unlock() must act as compiler barriers, at least for outermost +instances of rcu_read_lock() and rcu_read_unlock() within a nested set of RCU read-side critical sections. Energy Efficiency @@ -2071,26 +2071,26 @@ call. Because RCU avoids interrupting idle CPUs, it is illegal to execute an RCU read-side critical section on an idle CPU. (Kernels built with -``CONFIG_PROVE_RCU=y`` will splat if you try it.) The ``RCU_NONIDLE()`` +``CONFIG_PROVE_RCU=y`` will splat if you try it.) The RCU_NONIDLE() macro and ``_rcuidle`` event tracing is provided to work around this -restriction. In addition, ``rcu_is_watching()`` may be used to test +restriction. In addition, rcu_is_watching() may be used to test whether or not it is currently legal to run RCU read-side critical sections on this CPU. I learned of the need for diagnostics on the one -hand and ``RCU_NONIDLE()`` on the other while inspecting idle-loop code. +hand and RCU_NONIDLE() on the other while inspecting idle-loop code. Steven Rostedt supplied ``_rcuidle`` event tracing, which is used quite heavily in the idle loop. However, there are some restrictions on the -code placed within ``RCU_NONIDLE()``: +code placed within RCU_NONIDLE(): #. Blocking is prohibited. In practice, this is not a serious restriction given that idle tasks are prohibited from blocking to begin with. -#. Although nesting ``RCU_NONIDLE()`` is permitted, they cannot nest +#. Although nesting RCU_NONIDLE() is permitted, they cannot nest indefinitely deeply. However, given that they can be nested on the order of a million deep, even on 32-bit systems, this should not be a serious restriction. This nesting limit would probably be reached long after the compiler OOMed or the stack overflowed. -#. Any code path that enters ``RCU_NONIDLE()`` must sequence out of that - same ``RCU_NONIDLE()``. For example, the following is grossly +#. Any code path that enters RCU_NONIDLE() must sequence out of that + same RCU_NONIDLE(). For example, the following is grossly illegal: :: @@ -2103,7 +2103,7 @@ code placed within ``RCU_NONIDLE()``: It is just as illegal to transfer control into the middle of - ``RCU_NONIDLE()``'s argument. Yes, in theory, you could transfer in + RCU_NONIDLE()'s argument. Yes, in theory, you could transfer in as long as you also transferred out, but in practice you could also expect to get sharply worded review comments. @@ -2195,9 +2195,9 @@ scheduling-clock interrupt be enabled when RCU needs it to be: sections, and RCU believes this CPU to be idle, no problem. This sort of thing is used by some architectures for light-weight exception handlers, which can then avoid the overhead of - ``rcu_irq_enter()`` and ``rcu_irq_exit()`` at exception entry and + rcu_irq_enter() and rcu_irq_exit() at exception entry and exit, respectively. Some go further and avoid the entireties of - ``irq_enter()`` and ``irq_exit()``. + irq_enter() and irq_exit(). Just make very sure you are running some of your tests with ``CONFIG_PROVE_RCU=y``, just in case one of your code paths was in fact joking about not doing RCU read-side critical sections. @@ -2221,7 +2221,7 @@ scheduling-clock interrupt be enabled when RCU needs it to be: | **Quick Quiz**: | +-----------------------------------------------------------------------+ | But what if my driver has a hardware interrupt handler that can run | -| for many seconds? I cannot invoke ``schedule()`` from an hardware | +| for many seconds? I cannot invoke schedule() from an hardware | | interrupt handler, after all! | +-----------------------------------------------------------------------+ | **Answer**: | @@ -2243,8 +2243,8 @@ Memory Efficiency Although small-memory non-realtime systems can simply use Tiny RCU, code size is only one aspect of memory efficiency. Another aspect is the size -of the ``rcu_head`` structure used by ``call_rcu()`` and -``kfree_rcu()``. Although this structure contains nothing more than a +of the ``rcu_head`` structure used by call_rcu() and +kfree_rcu(). Although this structure contains nothing more than a pair of pointers, it does appear in many RCU-protected data structures, including some that are size critical. The ``page`` structure is a case in point, as evidenced by the many occurrences of the ``union`` keyword @@ -2254,7 +2254,7 @@ This need for memory efficiency is one reason that RCU uses hand-crafted singly linked lists to track the ``rcu_head`` structures that are waiting for a grace period to elapse. It is also the reason why ``rcu_head`` structures do not contain debug information, such as fields -tracking the file and line of the ``call_rcu()`` or ``kfree_rcu()`` that +tracking the file and line of the call_rcu() or kfree_rcu() that posted them. Although this information might appear in debug-only kernel builds at some point, in the meantime, the ``->func`` field will often provide the needed debug information. @@ -2268,14 +2268,14 @@ conditions next`` field. RCU makes this guarantee as long as ``call_rcu()`` is -used to post the callback, as opposed to ``kfree_rcu()`` or some future -“lazy” variant of ``call_rcu()`` that might one day be created for +``->next`` field. RCU makes this guarantee as long as call_rcu() is +used to post the callback, as opposed to kfree_rcu() or some future +“lazy” variant of call_rcu() that might one day be created for energy-efficiency purposes. That said, there are limits. RCU requires that the ``rcu_head`` structure be aligned to a two-byte boundary, and passing a misaligned -``rcu_head`` structure to one of the ``call_rcu()`` family of functions +``rcu_head`` structure to one of the call_rcu() family of functions will result in a splat. It is therefore necessary to exercise caution when packing structures containing fields of type ``rcu_head``. Why not a four-byte or even eight-byte alignment requirement? Because the m68k @@ -2299,7 +2299,7 @@ hot code paths in performance-critical portions of the Linux kernel's networking, security, virtualization, and scheduling code paths. RCU must therefore use efficient implementations, especially in its read-side primitives. To that end, it would be good if preemptible RCU's -implementation of ``rcu_read_lock()`` could be inlined, however, doing +implementation of rcu_read_lock() could be inlined, however, doing this requires resolving ``#include`` issues with the ``task_struct`` structure. @@ -2312,8 +2312,8 @@ on the ``rcu_node`` structure. RCU is required to tolerate all CPUs continuously invoking any combination of RCU's runtime primitives with minimal per-operation overhead. In fact, in many cases, increasing load must *decrease* the per-operation overhead, witness the batching -optimizations for ``synchronize_rcu()``, ``call_rcu()``, -``synchronize_rcu_expedited()``, and ``rcu_barrier()``. As a general +optimizations for synchronize_rcu(), call_rcu(), +synchronize_rcu_expedited(), and rcu_barrier(). As a general rule, RCU must cheerfully accept whatever the rest of the Linux kernel decides to throw at it. @@ -2346,7 +2346,7 @@ number of race conditions. RCU must avoid degrading real-time response for CPU-bound threads, whether executing in usermode (which is one use case for ``CONFIG_NO_HZ_FULL=y``) or in the kernel. That said, CPU-bound loops in -the kernel must execute ``cond_resched()`` at least once per few tens of +the kernel must execute cond_resched() at least once per few tens of milliseconds in order to avoid receiving an IPI from RCU. Finally, RCU's status as a synchronization primitive means that any RCU @@ -2412,7 +2412,7 @@ grace periods from ever ending. The result was an out-of-memory condition and a system hang. The solution was the creation of RCU-bh, which does -``local_bh_disable()`` across its read-side critical sections, and which +local_bh_disable() across its read-side critical sections, and which uses the transition from one type of softirq processing to another as a quiescent state in addition to context switch, idle, user mode, and offline. This means that RCU-bh grace periods can complete even when @@ -2420,29 +2420,29 @@ some of the CPUs execute in softirq indefinitely, thus allowing algorithms based on RCU-bh to withstand network-based denial-of-service attacks. -Because ``rcu_read_lock_bh()`` and ``rcu_read_unlock_bh()`` disable and +Because rcu_read_lock_bh() and rcu_read_unlock_bh() disable and re-enable softirq handlers, any attempt to start a softirq handlers during the RCU-bh read-side critical section will be deferred. In this -case, ``rcu_read_unlock_bh()`` will invoke softirq processing, which can +case, rcu_read_unlock_bh() will invoke softirq processing, which can take considerable time. One can of course argue that this softirq overhead should be associated with the code following the RCU-bh -read-side critical section rather than ``rcu_read_unlock_bh()``, but the +read-side critical section rather than rcu_read_unlock_bh(), but the fact is that most profiling tools cannot be expected to make this sort of fine distinction. For example, suppose that a three-millisecond-long RCU-bh read-side critical section executes during a time of heavy networking load. There will very likely be an attempt to invoke at least one softirq handler during that three milliseconds, but any such invocation will be delayed until the time of the -``rcu_read_unlock_bh()``. This can of course make it appear at first -glance as if ``rcu_read_unlock_bh()`` was executing very slowly. +rcu_read_unlock_bh(). This can of course make it appear at first +glance as if rcu_read_unlock_bh() was executing very slowly. The `RCU-bh API `__ -includes ``rcu_read_lock_bh()``, ``rcu_read_unlock_bh()``, -``rcu_dereference_bh()``, ``rcu_dereference_bh_check()``, -``synchronize_rcu_bh()``, ``synchronize_rcu_bh_expedited()``, -``call_rcu_bh()``, ``rcu_barrier_bh()``, and -``rcu_read_lock_bh_held()``. However, the update-side APIs are now +includes rcu_read_lock_bh(), rcu_read_unlock_bh(), +rcu_dereference_bh(), rcu_dereference_bh_check(), +synchronize_rcu_bh(), synchronize_rcu_bh_expedited(), +call_rcu_bh(), rcu_barrier_bh(), and +rcu_read_lock_bh_held(). However, the update-side APIs are now simple wrappers for other RCU flavors, namely RCU-sched in CONFIG_PREEMPT=n kernels and RCU-preempt otherwise. @@ -2467,27 +2467,27 @@ RCU-sched APIs have identical implementations, while kernels built with ``CONFIG_PREEMPT=y`` provide a separate implementation for each. Note well that in ``CONFIG_PREEMPT=y`` kernels, -``rcu_read_lock_sched()`` and ``rcu_read_unlock_sched()`` disable and +rcu_read_lock_sched() and rcu_read_unlock_sched() disable and re-enable preemption, respectively. This means that if there was a preemption attempt during the RCU-sched read-side critical section, -``rcu_read_unlock_sched()`` will enter the scheduler, with all the -latency and overhead entailed. Just as with ``rcu_read_unlock_bh()``, -this can make it look as if ``rcu_read_unlock_sched()`` was executing +rcu_read_unlock_sched() will enter the scheduler, with all the +latency and overhead entailed. Just as with rcu_read_unlock_bh(), +this can make it look as if rcu_read_unlock_sched() was executing very slowly. However, the highest-priority task won't be preempted, so -that task will enjoy low-overhead ``rcu_read_unlock_sched()`` +that task will enjoy low-overhead rcu_read_unlock_sched() invocations. The `RCU-sched API `__ -includes ``rcu_read_lock_sched()``, ``rcu_read_unlock_sched()``, -``rcu_read_lock_sched_notrace()``, ``rcu_read_unlock_sched_notrace()``, -``rcu_dereference_sched()``, ``rcu_dereference_sched_check()``, -``synchronize_sched()``, ``synchronize_rcu_sched_expedited()``, -``call_rcu_sched()``, ``rcu_barrier_sched()``, and -``rcu_read_lock_sched_held()``. However, anything that disables +includes rcu_read_lock_sched(), rcu_read_unlock_sched(), +rcu_read_lock_sched_notrace(), rcu_read_unlock_sched_notrace(), +rcu_dereference_sched(), rcu_dereference_sched_check(), +synchronize_sched(), synchronize_rcu_sched_expedited(), +call_rcu_sched(), rcu_barrier_sched(), and +rcu_read_lock_sched_held(). However, anything that disables preemption also marks an RCU-sched read-side critical section, including -``preempt_disable()`` and ``preempt_enable()``, ``local_irq_save()`` and -``local_irq_restore()``, and so on. +preempt_disable() and preempt_enable(), local_irq_save() and +local_irq_restore(), and so on. Sleepable RCU ~~~~~~~~~~~~~ @@ -2509,7 +2509,7 @@ this structure must be passed in to each SRCU function, for example, structure. The key benefit of these domains is that a slow SRCU reader in one domain does not delay an SRCU grace period in some other domain. That said, one consequence of these domains is that read-side code must -pass a “cookie” from ``srcu_read_lock()`` to ``srcu_read_unlock()``, for +pass a “cookie” from srcu_read_lock() to srcu_read_unlock(), for example, as follows: :: @@ -2539,24 +2539,24 @@ period to elapse. For example, this results in a self-deadlock: 6 srcu_read_unlock(&ss, idx); However, if line 5 acquired a mutex that was held across a -``synchronize_srcu()`` for domain ``ss``, deadlock would still be +synchronize_srcu() for domain ``ss``, deadlock would still be possible. Furthermore, if line 5 acquired a mutex that was held across a -``synchronize_srcu()`` for some other domain ``ss1``, and if an +synchronize_srcu() for some other domain ``ss1``, and if an ``ss1``-domain SRCU read-side critical section acquired another mutex -that was held across as ``ss``-domain ``synchronize_srcu()``, deadlock +that was held across as ``ss``-domain synchronize_srcu(), deadlock would again be possible. Such a deadlock cycle could extend across an arbitrarily large number of different SRCU domains. Again, with great power comes great responsibility. Unlike the other RCU flavors, SRCU read-side critical sections can run on idle and even offline CPUs. This ability requires that -``srcu_read_lock()`` and ``srcu_read_unlock()`` contain memory barriers, +srcu_read_lock() and srcu_read_unlock() contain memory barriers, which means that SRCU readers will run a bit slower than would RCU -readers. It also motivates the ``smp_mb__after_srcu_read_unlock()`` API, -which, in combination with ``srcu_read_unlock()``, guarantees a full +readers. It also motivates the smp_mb__after_srcu_read_unlock() API, +which, in combination with srcu_read_unlock(), guarantees a full memory barrier. -Also unlike other RCU flavors, ``synchronize_srcu()`` may **not** be +Also unlike other RCU flavors, synchronize_srcu() may **not** be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace periods make use of timers and the possibility of timers being temporarily “stranded” on the outgoing CPU. This stranding of timers @@ -2565,7 +2565,7 @@ the CPU-hotplug process. The problem is that if a notifier is waiting on an SRCU grace period, that grace period is waiting on a timer, and that timer is stranded on the outgoing CPU, then the notifier will never be awakened, in other words, deadlock has occurred. This same situation of -course also prohibits ``srcu_barrier()`` from being invoked from +course also prohibits srcu_barrier() from being invoked from CPU-hotplug notifiers. SRCU also differs from other RCU flavors in that SRCU's expedited and @@ -2576,12 +2576,12 @@ have not yet completed. (But please note that this is a property of the current implementation, not necessarily of future implementations.) In addition, if SRCU has been idle for longer than the interval specified by the ``srcutree.exp_holdoff`` kernel boot parameter (25 microseconds -by default), and if a ``synchronize_srcu()`` invocation ends this idle +by default), and if a synchronize_srcu() invocation ends this idle period, that invocation will be automatically expedited. As of v4.12, SRCU's callbacks are maintained per-CPU, eliminating a locking bottleneck present in prior kernel versions. Although this will -allow users to put much heavier stress on ``call_srcu()``, it is +allow users to put much heavier stress on call_srcu(), it is important to note that SRCU does not yet take any special steps to deal with callback flooding. So if you are posting (say) 10,000 SRCU callbacks per second per CPU, you are probably totally OK, but if you @@ -2592,12 +2592,12 @@ of your CPUs and the size of your memory. The `SRCU API `__ -includes ``srcu_read_lock()``, ``srcu_read_unlock()``, -``srcu_dereference()``, ``srcu_dereference_check()``, -``synchronize_srcu()``, ``synchronize_srcu_expedited()``, -``call_srcu()``, ``srcu_barrier()``, and ``srcu_read_lock_held()``. It -also includes ``DEFINE_SRCU()``, ``DEFINE_STATIC_SRCU()``, and -``init_srcu_struct()`` APIs for defining and initializing +includes srcu_read_lock(), srcu_read_unlock(), +srcu_dereference(), srcu_dereference_check(), +synchronize_srcu(), synchronize_srcu_expedited(), +call_srcu(), srcu_barrier(), and srcu_read_lock_held(). It +also includes DEFINE_SRCU(), DEFINE_STATIC_SRCU(), and +init_srcu_struct() APIs for defining and initializing ``srcu_struct`` structures. Tasks RCU @@ -2608,11 +2608,11 @@ required to install different types of probes. It would be good to be able to free old trampolines, which sounds like a job for some form of RCU. However, because it is necessary to be able to install a trace anywhere in the code, it is not possible to use read-side markers such -as ``rcu_read_lock()`` and ``rcu_read_unlock()``. In addition, it does +as rcu_read_lock() and rcu_read_unlock(). In addition, it does not work to have these markers in the trampoline itself, because there -would need to be instructions following ``rcu_read_unlock()``. Although -``synchronize_rcu()`` would guarantee that execution reached the -``rcu_read_unlock()``, it would not be able to guarantee that execution +would need to be instructions following rcu_read_unlock(). Although +synchronize_rcu() would guarantee that execution reached the +rcu_read_unlock(), it would not be able to guarantee that execution had completely left the trampoline. Worse yet, in some situations the trampoline's protection must extend a few instructions *prior* to execution reaching the trampoline. For example, these few instructions @@ -2623,15 +2623,15 @@ actually reached the trampoline itself. The solution, in the form of `Tasks RCU `__, is to have implicit read-side critical sections that are delimited by voluntary context switches, that -is, calls to ``schedule()``, ``cond_resched()``, and -``synchronize_rcu_tasks()``. In addition, transitions to and from +is, calls to schedule(), cond_resched(), and +synchronize_rcu_tasks(). In addition, transitions to and from userspace execution also delimit tasks-RCU read-side critical sections. The tasks-RCU API is quite compact, consisting only of -``call_rcu_tasks()``, ``synchronize_rcu_tasks()``, and -``rcu_barrier_tasks()``. In ``CONFIG_PREEMPT=n`` kernels, trampolines -cannot be preempted, so these APIs map to ``call_rcu()``, -``synchronize_rcu()``, and ``rcu_barrier()``, respectively. In +call_rcu_tasks(), synchronize_rcu_tasks(), and +rcu_barrier_tasks(). In ``CONFIG_PREEMPT=n`` kernels, trampolines +cannot be preempted, so these APIs map to call_rcu(), +synchronize_rcu(), and rcu_barrier(), respectively. In ``CONFIG_PREEMPT=y`` kernels, trampolines can be preempted, and these three APIs are therefore implemented by separate functions that check for voluntary context switches. @@ -2646,8 +2646,8 @@ grace-period state machine so as to avoid the need for the additional latency. RCU disables CPU hotplug in a few places, perhaps most notably in the -``rcu_barrier()`` operations. If there is a strong reason to use -``rcu_barrier()`` in CPU-hotplug notifiers, it will be necessary to +rcu_barrier() operations. If there is a strong reason to use +rcu_barrier() in CPU-hotplug notifiers, it will be necessary to avoid disabling CPU hotplug. This would introduce some complexity, so there had better be a *very* good reason. @@ -2664,7 +2664,7 @@ However, this combining tree does not spread its memory across NUMA nodes nor does it align the CPU groups with hardware features such as sockets or cores. Such spreading and alignment is currently believed to be unnecessary because the hotpath read-side primitives do not access -the combining tree, nor does ``call_rcu()`` in the common case. If you +the combining tree, nor does call_rcu() in the common case. If you believe that your architecture needs such spreading and alignment, then your architecture should also benefit from the ``rcutree.rcu_fanout_leaf`` boot parameter, which can be set to the @@ -2685,7 +2685,7 @@ likely that adjustments will be required to more gracefully handle extreme loads. It might also be necessary to be able to relate CPU utilization by RCU's kthreads and softirq handlers to the code that instigated this CPU utilization. For example, RCU callback overhead -might be charged back to the originating ``call_rcu()`` instance, though +might be charged back to the originating call_rcu() instance, though probably not in production kernels. Additional work may be required to provide reasonable forward-progress From 2c8bce609f095a8879d3948e0c18d629881518dd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 10 Nov 2020 16:17:42 -0800 Subject: [PATCH 003/130] doc: Remove obsolete RCU-bh and RCU-sched update-side API members synchronize_rcu_bh(), synchronize_rcu_bh_expedited(), call_rcu_bh(), rcu_barrier_bh(), synchronize_sched(), synchronize_rcu_sched_expedited(), call_rcu_sched(), and rcu_barrier_sched() no longer exist, so this commit removes mention of them. Reported-by: Joel Fernandes Signed-off-by: Paul E. McKenney --- .../RCU/Design/Requirements/Requirements.rst | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index 9b23be637e17..1e3df779c9c1 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -2438,13 +2438,13 @@ glance as if rcu_read_unlock_bh() was executing very slowly. The `RCU-bh API `__ -includes rcu_read_lock_bh(), rcu_read_unlock_bh(), -rcu_dereference_bh(), rcu_dereference_bh_check(), -synchronize_rcu_bh(), synchronize_rcu_bh_expedited(), -call_rcu_bh(), rcu_barrier_bh(), and -rcu_read_lock_bh_held(). However, the update-side APIs are now -simple wrappers for other RCU flavors, namely RCU-sched in -CONFIG_PREEMPT=n kernels and RCU-preempt otherwise. +includes rcu_read_lock_bh(), rcu_read_unlock_bh(), rcu_dereference_bh(), +rcu_dereference_bh_check(), and rcu_read_lock_bh_held(). However, the +old RCU-bh update-side APIs are now gone, replaced by synchronize_rcu(), +synchronize_rcu_expedited(), call_rcu(), and rcu_barrier(). In addition, +anything that disables bottom halves also marks an RCU-bh read-side +critical section, including local_bh_disable() and local_bh_enable(), +local_irq_save() and local_irq_restore(), and so on. Sched Flavor (Historical) ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2481,13 +2481,13 @@ The `RCU-sched API `__ includes rcu_read_lock_sched(), rcu_read_unlock_sched(), rcu_read_lock_sched_notrace(), rcu_read_unlock_sched_notrace(), -rcu_dereference_sched(), rcu_dereference_sched_check(), -synchronize_sched(), synchronize_rcu_sched_expedited(), -call_rcu_sched(), rcu_barrier_sched(), and -rcu_read_lock_sched_held(). However, anything that disables -preemption also marks an RCU-sched read-side critical section, including -preempt_disable() and preempt_enable(), local_irq_save() and -local_irq_restore(), and so on. +rcu_dereference_sched(), rcu_dereference_sched_check(), and +rcu_read_lock_sched_held(). However, the old RCU-sched update-side APIs +are now gone, replaced by synchronize_rcu(), synchronize_rcu_expedited(), +call_rcu(), and rcu_barrier(). In addition, anything that disables +preemption also marks an RCU-sched read-side critical section, +including preempt_disable() and preempt_enable(), local_irq_save() +and local_irq_restore(), and so on. Sleepable RCU ~~~~~~~~~~~~~ From 4704bd317108c94b6e2d8309f3dbb70d2015568a Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 16 Nov 2020 11:18:16 +0100 Subject: [PATCH 004/130] list: Fix a typo at the kernel-doc markup hlist_add_behing -> hlist_add_behind Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Paul E. McKenney --- include/linux/list.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/list.h b/include/linux/list.h index 89bdc92e75c3..f2af4b4aa4e9 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -901,7 +901,7 @@ static inline void hlist_add_before(struct hlist_node *n, } /** - * hlist_add_behing - add a new entry after the one specified + * hlist_add_behind - add a new entry after the one specified * @n: new entry to be added * @prev: hlist node to add it after, which must be non-NULL */ From 9d3a04853fe640e0eba2c0799c880b7dcf190219 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sat, 28 Nov 2020 15:32:59 -0500 Subject: [PATCH 005/130] docs: Fix typos and drop/fix dead links in RCU documentation It appears the Compaq link moved to a machine at HP for a while after the merger of the two, but that doesn't work either. A search of HP for "wiz_2637" (w and w/o html suffix) comes up empty. Since the references aren't critical to the documents we remove them. Also, the lkml.kernel.org/g links have been broken for ages, so replace them with lore.kernel.org/r links - standardize on lore for all links too. Note that we put off fixing these 4y ago - presumably thinking that a treewide fixup was pending. Probably safe to go fix the RCU ones now. https://lore.kernel.org/r/20160915144926.GD10850@linux.vnet.ibm.com/ Cc: Michael Opdenacker Cc: Steven Rostedt Cc: "Paul E. McKenney" Signed-off-by: Paul Gortmaker Signed-off-by: Paul E. McKenney --- .../RCU/Design/Requirements/Requirements.rst | 23 +++++++++---------- Documentation/RCU/checklist.rst | 8 +++---- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index 1e3df779c9c1..f32f8faddc7d 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -321,11 +321,10 @@ do_something_gp_buggy() below: 12 } However, this temptation must be resisted because there are a -surprisingly large number of ways that the compiler (to say nothing of -`DEC Alpha CPUs `__) -can trip this code up. For but one example, if the compiler were short -of registers, it might choose to refetch from ``gp`` rather than keeping -a separate copy in ``p`` as follows: +surprisingly large number of ways that the compiler (or weak ordering +CPUs like the DEC Alpha) can trip this code up. For but one example, if +the compiler were short of registers, it might choose to refetch from +``gp`` rather than keeping a separate copy in ``p`` as follows: :: @@ -1183,7 +1182,7 @@ costs have plummeted. However, as I learned from Matt Mackall's `bloatwatch `__ efforts, memory footprint is critically important on single-CPU systems with non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny -RCU `__ +RCU `__ was born. Josh Triplett has since taken over the small-memory banner with his `Linux kernel tinification `__ project, which resulted in `SRCU <#Sleepable%20RCU>`__ becoming optional @@ -1624,7 +1623,7 @@ against mishaps and misuse: init_rcu_head() and cleaned up with destroy_rcu_head(). Mathieu Desnoyers made me aware of this requirement, and also supplied the needed - `patch `__. + `patch `__. #. An infinite loop in an RCU read-side critical section will eventually trigger an RCU CPU stall warning splat, with the duration of “eventually” being controlled by the ``RCU_CPU_STALL_TIMEOUT`` @@ -1716,7 +1715,7 @@ requires almost all of them be hidden behind a ``CONFIG_RCU_EXPERT`` This all should be quite obvious, but the fact remains that Linus Torvalds recently had to -`remind `__ +`remind `__ me of this requirement. Firmware Interface @@ -1837,9 +1836,9 @@ NMI handlers. The name notwithstanding, some Linux-kernel architectures can have nested NMIs, which RCU must handle correctly. Andy Lutomirski `surprised -me `__ +me `__ with this requirement; he also kindly surprised me with `an -algorithm `__ +algorithm `__ that meets this requirement. Furthermore, NMI handlers can be interrupted by what appear to RCU to be @@ -2264,7 +2263,7 @@ more extreme measures. Returning to the ``page`` structure, the ``rcu_head`` field shares storage with a great many other structures that are used at various points in the corresponding page's lifetime. In order to correctly resolve certain `race -conditions `__, +conditions `__, the Linux kernel's memory-management subsystem needs a particular bit to remain zero during all phases of grace-period processing, and that bit happens to map to the bottom bit of the ``rcu_head`` structure's @@ -2328,7 +2327,7 @@ preempted. This requirement made its presence known after users made it clear that an earlier `real-time patch `__ did not meet their needs, in conjunction with some `RCU -issues `__ +issues `__ encountered by a very early version of the -rt patchset. In addition, RCU must make do with a sub-100-microsecond real-time diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst index bb7128eb322e..2d1dc1deffc9 100644 --- a/Documentation/RCU/checklist.rst +++ b/Documentation/RCU/checklist.rst @@ -70,7 +70,7 @@ over a rather long period of time, but improvements are always welcome! is less readable and prevents lockdep from detecting locking issues. Letting RCU-protected pointers "leak" out of an RCU read-side - critical section is every bid as bad as letting them leak out + critical section is every bit as bad as letting them leak out from under a lock. Unless, of course, you have arranged some other means of protection, such as a lock or a reference count -before- letting them out of the RCU read-side critical section. @@ -129,9 +129,7 @@ over a rather long period of time, but improvements are always welcome! accesses. The rcu_dereference() primitive ensures that the CPU picks up the pointer before it picks up the data that the pointer points to. This really is necessary - on Alpha CPUs. If you don't believe me, see: - - http://www.openvms.compaq.com/wizard/wiz_2637.html + on Alpha CPUs. The rcu_dereference() primitive is also an excellent documentation aid, letting the person reading the @@ -216,7 +214,7 @@ over a rather long period of time, but improvements are always welcome! 7. As of v4.20, a given kernel implements only one RCU flavor, which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y. If the updater uses call_rcu() or synchronize_rcu(), - then the corresponding readers my use rcu_read_lock() and + then the corresponding readers may use rcu_read_lock() and rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(), or any pair of primitives that disables and re-enables preemption, for example, rcu_read_lock_sched() and rcu_read_unlock_sched(). From d756c74e6f6e76e99f8bffcea57833816dd335b6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Dec 2020 16:54:41 -0800 Subject: [PATCH 006/130] doc: Update RCU requirements RCU_INIT_POINTER() description Back in the day, RCU_INIT_POINTER() was the only way to avoid memory-barrier instructions while storing NULL to an RCU-protected pointer. Fortunately, in 2016, rcu_assign_pointer() started checking for compile-time NULL pointers and omitting the memory-barrier instructions in that case. Unfortunately, RCU's Requirements.rst document was not updated accordingly. This commit therefore at long last carries out that update. Fixes: 3a37f7275cda ("rcu: No ordering for rcu_assign_pointer() of NULL") Link: https://lore.kernel.org/lkml/20201209230755.GV7338@casper.infradead.org/ Reported-by: Matthew Wilcox Acked-by: Linus Torvalds Signed-off-by: Paul E. McKenney --- Documentation/RCU/Design/Requirements/Requirements.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index f32f8faddc7d..65c7839114a5 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -1668,8 +1668,7 @@ against mishaps and misuse: this purpose. #. It is not necessary to use rcu_assign_pointer() when creating linked structures that are to be published via a single external - pointer. The RCU_INIT_POINTER() macro is provided for this task - and also for assigning ``NULL`` pointers at runtime. + pointer. The RCU_INIT_POINTER() macro is provided for this task. This not a hard-and-fast list: RCU's diagnostic capabilities will continue to be guided by the number and type of usage bugs found in From 2252ec1464730ce718dc8087c13a419b9aa58758 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 10 Dec 2020 09:53:50 -0800 Subject: [PATCH 007/130] doc: Remove obsolete rcutree.rcu_idle_lazy_gp_delay boot parameter This commit removes documentation for the rcutree.rcu_idle_lazy_gp_delay kernel boot parameter given that this parameter no longer exists. Fixes: 77a40f97030b ("rcu: Remove kfree_rcu() special casing and lazy-callback handling") Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c722ec19cd00..b5baa8a54df0 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4179,12 +4179,6 @@ Set wakeup interval for idle CPUs that have RCU callbacks (RCU_FAST_NO_HZ=y). - rcutree.rcu_idle_lazy_gp_delay= [KNL] - Set wakeup interval for idle CPUs that have - only "lazy" RCU callbacks (RCU_FAST_NO_HZ=y). - Lazy RCU callbacks are those which RCU can - prove do nothing more than free memory. - rcutree.rcu_kick_kthreads= [KNL] Cause the grace-period kthread to get an extra wake_up() if it sleeps three times longer than From 84109ab58590dc6c4e7eb36329fdc7ec121ed5a5 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Fri, 20 Nov 2020 06:53:11 -0800 Subject: [PATCH 008/130] rcu: Record kvfree_call_rcu() call stack for KASAN This commit adds a call to kasan_record_aux_stack() in kvfree_call_rcu() in order to record the call stack of the code that caused the object to be freed. Please note that this function does not update the allocated/freed state, which is important because RCU readers might still be referencing this object. Acked-by: Dmitry Vyukov Reviewed-by: Uladzislau Rezki (Sony) Signed-off-by: Zqiang Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e..2db736cbe342 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3498,6 +3498,7 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) goto unlock_return; } + kasan_record_aux_stack(ptr); success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr); if (!success) { run_page_cache_worker(krcp); From 5130b8fd06901c1b3a4bd0d0f5c5ea99b2b0a6f0 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Fri, 20 Nov 2020 12:49:16 +0100 Subject: [PATCH 009/130] rcu: Introduce kfree_rcu() single-argument macro There is a kvfree_rcu() single argument macro that handles pointers returned by kvmalloc(). Even though it also handles pointer returned by kmalloc(), readability suffers. This commit therefore updates the kfree_rcu() macro to explicitly pair with kmalloc(), thus improving readability. Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de0826411311..b95373ea3b02 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -851,8 +851,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /** * kfree_rcu() - kfree an object after a grace period. - * @ptr: pointer to kfree - * @rhf: the name of the struct rcu_head within the type of @ptr. + * @ptr: pointer to kfree for both single- and double-argument invocations. + * @rhf: the name of the struct rcu_head within the type of @ptr, + * but only for double-argument invocations. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore @@ -875,13 +876,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ -#define kfree_rcu(ptr, rhf) \ -do { \ - typeof (ptr) ___p = (ptr); \ - \ - if (___p) \ - __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ -} while (0) +#define kfree_rcu kvfree_rcu /** * kvfree_rcu() - kvfree an object after a grace period. @@ -913,7 +908,14 @@ do { \ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME -#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu_arg_2(ptr, rhf) \ +do { \ + typeof (ptr) ___p = (ptr); \ + \ + if (___p) \ + __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ +} while (0) + #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ From 5ea5d1ed572cb5ac173674fe770252253d2d9e27 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Fri, 20 Nov 2020 12:49:17 +0100 Subject: [PATCH 010/130] rcu: Eliminate the __kvfree_rcu() macro This commit open-codes the __kvfree_rcu() macro, thus saving a few lines of code and improving readability. Signed-off-by: Uladzislau Rezki (Sony) Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b95373ea3b02..f1576cde5951 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -840,15 +840,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) */ #define __is_kvfree_rcu_offset(offset) ((offset) < 4096) -/* - * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. - */ -#define __kvfree_rcu(head, offset) \ - do { \ - BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \ - kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ - } while (0) - /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree for both single- and double-argument invocations. @@ -866,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in __kvfree_rcu(). If this error is triggered, you can + * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -912,8 +903,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) do { \ typeof (ptr) ___p = (ptr); \ \ - if (___p) \ - __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ + if (___p) { \ + BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ + kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \ + (offsetof(typeof(*(ptr)), rhf))); \ + } \ } while (0) #define kvfree_rcu_arg_1(ptr) \ From 2341bc4a0311e4319ced6c2828bb19309dee74fd Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 15 Dec 2020 15:16:45 +0100 Subject: [PATCH 011/130] rcu: Make RCU_BOOST default on CONFIG_PREEMPT_RT On PREEMPT_RT kernels, RCU callbacks are deferred to the `rcuc' kthread. This can stall RCU grace periods due to lengthy preemption not only of RCU readers but also of 'rcuc' kthreads, either of which prevent grace periods from completing, which can in turn result in OOM. Because PREEMPT_RT kernels have more kthreads that can block grace periods, it is more important for such kernels to enable RCU_BOOST. This commit therefore makes RCU_BOOST the default on PREEMPT_RT. RCU_BOOST can still be manually disabled if need be. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- kernel/rcu/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index cdc57b4f6d48..aa8cc8c977e7 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -188,8 +188,8 @@ config RCU_FAST_NO_HZ config RCU_BOOST bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT - default n + depends on (RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT) || PREEMPT_RT + default y if PREEMPT_RT help This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. From 8b9a0ecc7ef5e1ed3afbc926de17399a37128c82 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 15 Dec 2020 15:16:46 +0100 Subject: [PATCH 012/130] rcu: Unconditionally use rcuc threads on PREEMPT_RT PREEMPT_RT systems have long used the rcutree.use_softirq kernel boot parameter to avoid use of RCU_SOFTIRQ handlers, which can disrupt real-time applications by invoking callbacks during return from interrupts that arrived while executing time-critical code. This kernel boot parameter instead runs RCU core processing in an 'rcuc' kthread, thus allowing the scheduler to do its job of avoiding disrupting time-critical code. This commit therefore disables the rcutree.use_softirq kernel boot parameter on PREEMPT_RT systems, thus forcing such systems to do RCU core processing in 'rcuc' kthreads. This approach has long been in use by users of the -rt patchset, and there have been no complaints. There is therefore no way for the system administrator to override this choice, at least without modifying and rebuilding the kernel. Signed-off-by: Scott Wood [bigeasy: Reword commit message] Signed-off-by: Sebastian Andrzej Siewior [ paulmck: Update kernel-parameters.txt accordingly. ] Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 4 ++++ kernel/rcu/tree.c | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c722ec19cd00..521255f4ef13 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4092,6 +4092,10 @@ value, meaning that RCU_SOFTIRQ is used by default. Specify rcutree.use_softirq=0 to use rcuc kthreads. + But note that CONFIG_PREEMPT_RT=y kernels disable + this kernel boot parameter, forcibly setting it + to zero. + rcutree.rcu_fanout_exact= [KNL] Disable autobalancing of the rcu_node combining tree. This is used by rcutorture, and might diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e..d60903581300 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -100,8 +100,10 @@ static struct rcu_state rcu_state = { static bool dump_tree; module_param(dump_tree, bool, 0444); /* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */ -static bool use_softirq = true; +static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT); +#ifndef CONFIG_PREEMPT_RT module_param(use_softirq, bool, 0444); +#endif /* Control rcu_node-tree auto-balancing at boot time. */ static bool rcu_fanout_exact; module_param(rcu_fanout_exact, bool, 0444); From 36221e109eb20ac111bc3bf3e8d5639aa457c7e0 Mon Sep 17 00:00:00 2001 From: Julia Cartwright Date: Tue, 15 Dec 2020 15:16:47 +0100 Subject: [PATCH 013/130] rcu: Enable rcu_normal_after_boot unconditionally for RT Expedited RCU grace periods send IPIs to all non-idle CPUs, and thus can disrupt time-critical code in real-time applications. However, there is a portion of boot-time processing (presumably before any real-time applications have started) where expedited RCU grace periods are the only option. And so it is that experience with the -rt patchset indicates that PREEMPT_RT systems should always set the rcupdate.rcu_normal_after_boot kernel boot parameter. This commit therefore makes the post-boot application environment safe for real-time applications by making PREEMPT_RT systems disable the rcupdate.rcu_normal_after_boot kernel boot parameter and acting as if this parameter had been set. This means that post-boot calls to synchronize_rcu_expedited() will be treated as if they were instead calls to synchronize_rcu(), thus preventing the IPIs, and thus avoiding disrupting real-time applications. Suggested-by: Luiz Capitulino Acked-by: Paul E. McKenney Signed-off-by: Julia Cartwright Signed-off-by: Sebastian Andrzej Siewior [ paulmck: Update kernel-parameters.txt accordingly. ] Signed-off-by: Paul E. McKenney --- Documentation/admin-guide/kernel-parameters.txt | 7 +++++++ kernel/rcu/update.c | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 521255f4ef13..e0008d9caccb 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4474,6 +4474,13 @@ only normal grace-period primitives. No effect on CONFIG_TINY_RCU kernels. + But note that CONFIG_PREEMPT_RT=y kernels enables + this kernel boot parameter, forcibly setting + it to the value one, that is, converting any + post-boot attempt at an expedited RCU grace + period to instead use normal non-expedited + grace-period processing. + rcupdate.rcu_task_ipi_delay= [KNL] Set time in jiffies during which RCU tasks will avoid sending IPIs, starting with the beginning diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 39334d2d2b37..b95ae86c40a7 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -56,8 +56,10 @@ #ifndef CONFIG_TINY_RCU module_param(rcu_expedited, int, 0); module_param(rcu_normal, int, 0); -static int rcu_normal_after_boot; +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT); +#ifndef CONFIG_PREEMPT_RT module_param(rcu_normal_after_boot, int, 0); +#endif #endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_DEBUG_LOCK_ALLOC From 74612a07b83fc46c2b2e6f71a541d55b024ebefc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 12 Nov 2020 16:34:09 -0800 Subject: [PATCH 014/130] srcu: Make Tiny SRCU use multi-bit grace-period counter There is a need for a polling interface for SRCU grace periods. This polling needs to distinguish between an SRCU instance being idle on the one hand or in the middle of a grace period on the other. This commit therefore converts the Tiny SRCU srcu_struct structure's srcu_idx from a defacto boolean to a free-running counter, using the bottom bit to indicate that a grace period is in progress. The second-from-bottom bit is thus used as the index returned by srcu_read_lock(). Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet [ paulmck: Fix ->srcu_lock_nesting[] indexing per Neeraj Upadhyay. ] Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- include/linux/srcutiny.h | 6 +++--- kernel/rcu/srcutiny.c | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 5a5a1941ca15..b8b42d0a24f1 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -15,7 +15,7 @@ struct srcu_struct { short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ - short srcu_idx; /* Current reader array element. */ + unsigned short srcu_idx; /* Current reader array element in bit 0x2. */ u8 srcu_gp_running; /* GP workqueue running? */ u8 srcu_gp_waiting; /* GP waiting for readers? */ struct swait_queue_head srcu_wq; @@ -59,7 +59,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; - idx = READ_ONCE(ssp->srcu_idx); + idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); return idx; } @@ -80,7 +80,7 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, { int idx; - idx = READ_ONCE(ssp->srcu_idx) & 0x1; + idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, READ_ONCE(ssp->srcu_lock_nesting[!idx]), diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 6208c1dae5c9..5598cf6f1edc 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -124,11 +124,12 @@ void srcu_drive_gp(struct work_struct *wp) ssp->srcu_cb_head = NULL; ssp->srcu_cb_tail = &ssp->srcu_cb_head; local_irq_enable(); - idx = ssp->srcu_idx; - WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx); + idx = (ssp->srcu_idx & 0x2) / 2; + WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ + WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); /* Invoke the callbacks we removed above. */ while (lh) { From 1a893c711a600ab57526619b56e6f6b7be00956e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 09:37:39 -0800 Subject: [PATCH 015/130] srcu: Provide internal interface to start a Tiny SRCU grace period There is a need for a polling interface for SRCU grace periods. This polling needs to initiate an SRCU grace period without having to queue (and manage) a callback. This commit therefore splits the Tiny SRCU call_srcu() function into callback-queuing and start-grace-period portions, with the latter in a new function named srcu_gp_start_if_needed(). Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutiny.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 5598cf6f1edc..3bac1db85a85 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -152,6 +152,16 @@ void srcu_drive_gp(struct work_struct *wp) } EXPORT_SYMBOL_GPL(srcu_drive_gp); +static void srcu_gp_start_if_needed(struct srcu_struct *ssp) +{ + if (!READ_ONCE(ssp->srcu_gp_running)) { + if (likely(srcu_init_done)) + schedule_work(&ssp->srcu_work); + else if (list_empty(&ssp->srcu_work.entry)) + list_add(&ssp->srcu_work.entry, &srcu_boot_list); + } +} + /* * Enqueue an SRCU callback on the specified srcu_struct structure, * initiating grace-period processing if it is not already running. @@ -167,12 +177,7 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, *ssp->srcu_cb_tail = rhp; ssp->srcu_cb_tail = &rhp->next; local_irq_restore(flags); - if (!READ_ONCE(ssp->srcu_gp_running)) { - if (likely(srcu_init_done)) - schedule_work(&ssp->srcu_work); - else if (list_empty(&ssp->srcu_work.entry)) - list_add(&ssp->srcu_work.entry, &srcu_boot_list); - } + srcu_gp_start_if_needed(ssp); } EXPORT_SYMBOL_GPL(call_srcu); From 29d2bb94a8a126ce80ffbb433b648b32fdea524e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 10:08:09 -0800 Subject: [PATCH 016/130] srcu: Provide internal interface to start a Tree SRCU grace period There is a need for a polling interface for SRCU grace periods. This polling needs to initiate an SRCU grace period without having to queue (and manage) a callback. This commit therefore splits the Tree SRCU __call_srcu() function into callback-initialization and queuing/start-grace-period portions, with the latter in a new function named srcu_gp_start_if_needed(). This function may be passed a NULL callback pointer, in which case it will refrain from queuing anything. Why have the new function mess with queuing? Locking considerations, of course! Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 66 ++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 0f23d20d485a..9a7b6505fdc8 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -807,6 +807,42 @@ static void srcu_leak_callback(struct rcu_head *rhp) { } +/* + * Start an SRCU grace period, and also queue the callback if non-NULL. + */ +static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm) +{ + unsigned long flags; + int idx; + bool needexp = false; + bool needgp = false; + unsigned long s; + struct srcu_data *sdp; + + idx = srcu_read_lock(ssp); + sdp = raw_cpu_ptr(ssp->sda); + spin_lock_irqsave_rcu_node(sdp, flags); + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); + rcu_segcblist_advance(&sdp->srcu_cblist, + rcu_seq_current(&ssp->srcu_gp_seq)); + s = rcu_seq_snap(&ssp->srcu_gp_seq); + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); + if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { + sdp->srcu_gp_seq_needed = s; + needgp = true; + } + if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { + sdp->srcu_gp_seq_needed_exp = s; + needexp = true; + } + spin_unlock_irqrestore_rcu_node(sdp, flags); + if (needgp) + srcu_funnel_gp_start(ssp, sdp, s, do_norm); + else if (needexp) + srcu_funnel_exp_start(ssp, sdp->mynode, s); + srcu_read_unlock(ssp, idx); +} + /* * Enqueue an SRCU callback on the srcu_data structure associated with * the current CPU and the specified srcu_struct structure, initiating @@ -838,13 +874,6 @@ static void srcu_leak_callback(struct rcu_head *rhp) static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rcu_callback_t func, bool do_norm) { - unsigned long flags; - int idx; - bool needexp = false; - bool needgp = false; - unsigned long s; - struct srcu_data *sdp; - check_init_srcu_struct(ssp); if (debug_rcu_head_queue(rhp)) { /* Probable double call_srcu(), so leak the callback. */ @@ -853,28 +882,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, return; } rhp->func = func; - idx = srcu_read_lock(ssp); - sdp = raw_cpu_ptr(ssp->sda); - spin_lock_irqsave_rcu_node(sdp, flags); - rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); - rcu_segcblist_advance(&sdp->srcu_cblist, - rcu_seq_current(&ssp->srcu_gp_seq)); - s = rcu_seq_snap(&ssp->srcu_gp_seq); - (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); - if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { - sdp->srcu_gp_seq_needed = s; - needgp = true; - } - if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { - sdp->srcu_gp_seq_needed_exp = s; - needexp = true; - } - spin_unlock_irqrestore_rcu_node(sdp, flags); - if (needgp) - srcu_funnel_gp_start(ssp, sdp, s, do_norm); - else if (needexp) - srcu_funnel_exp_start(ssp, sdp->mynode, s); - srcu_read_unlock(ssp, idx); + srcu_gp_start_if_needed(ssp, rhp, do_norm); } /** From 8b5bd67cf6422b63ee100d76d8de8960ca2df7f0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 12:54:48 -0800 Subject: [PATCH 017/130] srcu: Provide polling interfaces for Tiny SRCU grace periods There is a need for a polling interface for SRCU grace periods, so this commit supplies get_state_synchronize_srcu(), start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() for this purpose. The first can be used if future grace periods are inevitable (perhaps due to a later call_srcu() invocation), the second if future grace periods might not otherwise happen, and the third to check if a grace period has elapsed since the corresponding call to either of the first two. As with get_state_synchronize_rcu() and cond_synchronize_rcu(), the return value from either get_state_synchronize_srcu() or start_poll_synchronize_srcu() must be passed in to a later call to poll_state_synchronize_srcu(). Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet [ paulmck: Add EXPORT_SYMBOL_GPL() per kernel test robot feedback. ] [ paulmck: Apply feedback from Neeraj Upadhyay. ] Link: https://lore.kernel.org/lkml/20201117004017.GA7444@paulmck-ThinkPad-P72/ Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 ++ include/linux/srcu.h | 3 +++ include/linux/srcutiny.h | 1 + kernel/rcu/srcutiny.c | 55 ++++++++++++++++++++++++++++++++++++++-- 4 files changed, 59 insertions(+), 2 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de0826411311..e09c0d87b3c3 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -33,6 +33,8 @@ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ulong2long(a) (*(long *)(&(a))) +#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b))) +#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b))) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); diff --git a/include/linux/srcu.h b/include/linux/srcu.h index e432cc92c73d..a0895bbf71ce 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -60,6 +60,9 @@ void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); void synchronize_srcu(struct srcu_struct *ssp); +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index b8b42d0a24f1..0e0cf4d6a72a 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -16,6 +16,7 @@ struct srcu_struct { short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ unsigned short srcu_idx; /* Current reader array element in bit 0x2. */ + unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */ u8 srcu_gp_running; /* GP workqueue running? */ u8 srcu_gp_waiting; /* GP waiting for readers? */ struct swait_queue_head srcu_wq; diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index 3bac1db85a85..26344dc6483b 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -34,6 +34,7 @@ static int init_srcu_struct_fields(struct srcu_struct *ssp) ssp->srcu_gp_running = false; ssp->srcu_gp_waiting = false; ssp->srcu_idx = 0; + ssp->srcu_idx_max = 0; INIT_WORK(&ssp->srcu_work, srcu_drive_gp); INIT_LIST_HEAD(&ssp->srcu_work.entry); return 0; @@ -84,6 +85,8 @@ void cleanup_srcu_struct(struct srcu_struct *ssp) WARN_ON(ssp->srcu_gp_waiting); WARN_ON(ssp->srcu_cb_head); WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); + WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max); + WARN_ON(ssp->srcu_idx & 0x1); } EXPORT_SYMBOL_GPL(cleanup_srcu_struct); @@ -114,7 +117,7 @@ void srcu_drive_gp(struct work_struct *wp) struct srcu_struct *ssp; ssp = container_of(wp, struct srcu_struct, srcu_work); - if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head)) + if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) return; /* Already running or nothing to do. */ /* Remove recently arrived callbacks and wait for readers. */ @@ -147,13 +150,19 @@ void srcu_drive_gp(struct work_struct *wp) * straighten that out. */ WRITE_ONCE(ssp->srcu_gp_running, false); - if (READ_ONCE(ssp->srcu_cb_head)) + if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) schedule_work(&ssp->srcu_work); } EXPORT_SYMBOL_GPL(srcu_drive_gp); static void srcu_gp_start_if_needed(struct srcu_struct *ssp) { + unsigned short cookie; + + cookie = get_state_synchronize_srcu(ssp); + if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) + return; + WRITE_ONCE(ssp->srcu_idx_max, cookie); if (!READ_ONCE(ssp->srcu_gp_running)) { if (likely(srcu_init_done)) schedule_work(&ssp->srcu_work); @@ -196,6 +205,48 @@ void synchronize_srcu(struct srcu_struct *ssp) } EXPORT_SYMBOL_GPL(synchronize_srcu); +/* + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie + */ +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) +{ + unsigned long ret; + + barrier(); + ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; + barrier(); + return ret & USHRT_MAX; +} +EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); + +/* + * start_poll_synchronize_srcu - Provide cookie and start grace period + * + * The difference between this and get_state_synchronize_srcu() is that + * this function ensures that the poll_state_synchronize_srcu() will + * eventually return the value true. + */ +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) +{ + unsigned long ret = get_state_synchronize_srcu(ssp); + + srcu_gp_start_if_needed(ssp); + return ret; +} +EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); + +/* + * poll_state_synchronize_srcu - Has cookie's grace period ended? + */ +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) +{ + bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie); + + barrier(); + return ret; +} +EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); + /* Lockdep diagnostics. */ void __init rcu_scheduler_starting(void) { From 5358c9fa54b09b5d3d7811b033aa0838c1bbaaf2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 17:31:55 -0800 Subject: [PATCH 018/130] srcu: Provide polling interfaces for Tree SRCU grace periods There is a need for a polling interface for SRCU grace periods, so this commit supplies get_state_synchronize_srcu(), start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() for this purpose. The first can be used if future grace periods are inevitable (perhaps due to a later call_srcu() invocation), the second if future grace periods might not otherwise happen, and the third to check if a grace period has elapsed since the corresponding call to either of the first two. As with get_state_synchronize_rcu() and cond_synchronize_rcu(), the return value from either get_state_synchronize_srcu() or start_poll_synchronize_srcu() must be passed in to a later call to poll_state_synchronize_srcu(). Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet [ paulmck: Add EXPORT_SYMBOL_GPL() per kernel test robot feedback. ] [ paulmck: Apply feedback from Neeraj Upadhyay. ] Link: https://lore.kernel.org/lkml/20201117004017.GA7444@paulmck-ThinkPad-P72/ Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 67 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 9a7b6505fdc8..c5d0c036fab5 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -810,7 +810,8 @@ static void srcu_leak_callback(struct rcu_head *rhp) /* * Start an SRCU grace period, and also queue the callback if non-NULL. */ -static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm) +static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, + struct rcu_head *rhp, bool do_norm) { unsigned long flags; int idx; @@ -819,10 +820,12 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rh unsigned long s; struct srcu_data *sdp; + check_init_srcu_struct(ssp); idx = srcu_read_lock(ssp); sdp = raw_cpu_ptr(ssp->sda); spin_lock_irqsave_rcu_node(sdp, flags); - rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); + if (rhp) + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); rcu_segcblist_advance(&sdp->srcu_cblist, rcu_seq_current(&ssp->srcu_gp_seq)); s = rcu_seq_snap(&ssp->srcu_gp_seq); @@ -841,6 +844,7 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rh else if (needexp) srcu_funnel_exp_start(ssp, sdp->mynode, s); srcu_read_unlock(ssp, idx); + return s; } /* @@ -874,7 +878,6 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rh static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rcu_callback_t func, bool do_norm) { - check_init_srcu_struct(ssp); if (debug_rcu_head_queue(rhp)) { /* Probable double call_srcu(), so leak the callback. */ WRITE_ONCE(rhp->func, srcu_leak_callback); @@ -882,7 +885,7 @@ static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, return; } rhp->func = func; - srcu_gp_start_if_needed(ssp, rhp, do_norm); + (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); } /** @@ -1011,6 +1014,62 @@ void synchronize_srcu(struct srcu_struct *ssp) } EXPORT_SYMBOL_GPL(synchronize_srcu); +/** + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie + * @ssp: srcu_struct to provide cookie for. + * + * This function returns a cookie that can be passed to + * poll_state_synchronize_srcu(), which will return true if a full grace + * period has elapsed in the meantime. It is the caller's responsibility + * to make sure that grace period happens, for example, by invoking + * call_srcu() after return from get_state_synchronize_srcu(). + */ +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) +{ + // Any prior manipulation of SRCU-protected data must happen + // before the load from ->srcu_gp_seq. + smp_mb(); + return rcu_seq_snap(&ssp->srcu_gp_seq); +} +EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); + +/** + * start_poll_synchronize_srcu - Provide cookie and start grace period + * @ssp: srcu_struct to provide cookie for. + * + * This function returns a cookie that can be passed to + * poll_state_synchronize_srcu(), which will return true if a full grace + * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), + * this function also ensures that any needed SRCU grace period will be + * started. This convenience does come at a cost in terms of CPU overhead. + */ +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) +{ + return srcu_gp_start_if_needed(ssp, NULL, true); +} +EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); + +/** + * poll_state_synchronize_srcu - Has cookie's grace period ended? + * @ssp: srcu_struct to provide cookie for. + * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). + * + * This function takes the cookie that was returned from either + * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and + * returns @true if an SRCU grace period elapsed since the time that the + * cookie was created. + */ +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) +{ + if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) + return false; + // Ensure that the end of the SRCU grace period happens before + // any subsequent code that the caller might execute. + smp_mb(); // ^^^ + return true; +} +EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); + /* * Callback function for srcu_barrier() use. */ From ee7f4a87a18cd3bb141b38e2ef0c3e53253cdf63 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Nov 2020 16:01:32 -0800 Subject: [PATCH 019/130] srcu: Document polling interfaces for Tree SRCU grace periods This commit adds requirements documentation for the get_state_synchronize_srcu(), start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() functions. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Reviewed-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- .../RCU/Design/Requirements/Requirements.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index e8c84fcc0507..93a189ae8592 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -2600,6 +2600,24 @@ also includes ``DEFINE_SRCU()``, ``DEFINE_STATIC_SRCU()``, and ``init_srcu_struct()`` APIs for defining and initializing ``srcu_struct`` structures. +More recently, the SRCU API has added polling interfaces: + +#. start_poll_synchronize_srcu() returns a cookie identifying + the completion of a future SRCU grace period and ensures + that this grace period will be started. +#. poll_state_synchronize_srcu() returns ``true`` iff the + specified cookie corresponds to an already-completed + SRCU grace period. +#. get_state_synchronize_srcu() returns a cookie just like + start_poll_synchronize_srcu() does, but differs in that + it does nothing to ensure that any future SRCU grace period + will be started. + +These functions are used to avoid unnecessary SRCU grace periods in +certain types of buffer-cache algorithms having multi-stage age-out +mechanisms. The idea is that by the time the block has aged completely +from the cache, an SRCU grace period will be very likely to have elapsed. + Tasks RCU ~~~~~~~~~ From 4e7ccfae52b39aeee93ed39d4184d50ea201fbef Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 15 Nov 2020 20:33:38 -0800 Subject: [PATCH 020/130] srcu: Add comment explaining cookie overflow/wrap This commit adds to the poll_state_synchronize_srcu() header comment describing the issues surrounding SRCU cookie overflow/wrap for the different kernel configurations. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/srcutree.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index c5d0c036fab5..119938d381e2 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1058,6 +1058,21 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and * returns @true if an SRCU grace period elapsed since the time that the * cookie was created. + * + * Because cookies are finite in size, wrapping/overflow is possible. + * This is more pronounced on 32-bit systems where cookies are 32 bits, + * where in theory wrapping could happen in about 14 hours assuming + * 25-microsecond expedited SRCU grace periods. However, a more likely + * overflow lower bound is on the order of 24 days in the case of + * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit + * system requires geologic timespans, as in more than seven million years + * even for expedited SRCU grace periods. + * + * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems + * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses + * a 16-bit cookie, which rcutorture routinely wraps in a matter of a + * few minutes. If this proves to be a problem, this counter will be + * expanded to the same size as for Tree SRCU. */ bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) { From fd56f64b4e3b9c53fbb12ef74c6f1f5fde4cc1c8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 20:14:27 -0800 Subject: [PATCH 021/130] rcutorture: Prepare for ->start_gp_poll and ->poll_gp_state The new get_state_synchronize_srcu(), start_poll_synchronize_srcu() and poll_state_synchronize_srcu() functions need to be tested, and so this commit prepares by renaming the rcu_torture_ops field ->get_state to ->get_gp_state in order to be consistent with the upcoming ->start_gp_poll and ->poll_gp_state fields. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 528ed10b78fd..bcea23ceceb6 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -311,7 +311,7 @@ struct rcu_torture_ops { void (*deferred_free)(struct rcu_torture *p); void (*sync)(void); void (*exp_sync)(void); - unsigned long (*get_state)(void); + unsigned long (*get_gp_state)(void); void (*cond_sync)(unsigned long oldstate); call_rcu_func_t call; void (*cb_barrier)(void); @@ -461,7 +461,7 @@ static struct rcu_torture_ops rcu_ops = { .deferred_free = rcu_torture_deferred_free, .sync = synchronize_rcu, .exp_sync = synchronize_rcu_expedited, - .get_state = get_state_synchronize_rcu, + .get_gp_state = get_state_synchronize_rcu, .cond_sync = cond_synchronize_rcu, .call = call_rcu, .cb_barrier = rcu_barrier, @@ -1050,10 +1050,10 @@ rcu_torture_writer(void *arg) /* Initialize synctype[] array. If none set, take default. */ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; - if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { + if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { synctype[nsynctypes++] = RTWS_COND_GET; pr_info("%s: Testing conditional GPs.\n", __func__); - } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { + } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { pr_alert("%s: gp_cond without primitives.\n", __func__); } if (gp_exp1 && cur_ops->exp_sync) { @@ -1119,7 +1119,7 @@ rcu_torture_writer(void *arg) break; case RTWS_COND_GET: rcu_torture_writer_state = RTWS_COND_GET; - gp_snap = cur_ops->get_state(); + gp_snap = cur_ops->get_gp_state(); i = torture_random(&rand) % 16; if (i != 0) schedule_timeout_interruptible(i); From 0fd0548db13346bfb3bb23860ab270a32d6e385a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 13 Nov 2020 20:43:59 -0800 Subject: [PATCH 022/130] rcutorture: Add writer-side tests of polling grace-period API This commit adds writer-side testing of the polling grace-period API. One test verifies that the polling API sees a grace period caused by some other mechanism. Another test verifies that using the polling API to wait for a grace period does not result in too-short grace periods. A third test verifies that the polling API does not report completion within a read-side critical section. A fourth and final test verifies that the polling API does report completion given an intervening grace period. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 79 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bcea23ceceb6..78ba95dfe05d 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -85,6 +85,7 @@ torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives"); +torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); @@ -183,9 +184,11 @@ static int rcu_torture_writer_state; #define RTWS_EXP_SYNC 4 #define RTWS_COND_GET 5 #define RTWS_COND_SYNC 6 -#define RTWS_SYNC 7 -#define RTWS_STUTTER 8 -#define RTWS_STOPPING 9 +#define RTWS_POLL_GET 7 +#define RTWS_POLL_WAIT 8 +#define RTWS_SYNC 9 +#define RTWS_STUTTER 10 +#define RTWS_STOPPING 11 static const char * const rcu_torture_writer_state_names[] = { "RTWS_FIXED_DELAY", "RTWS_DELAY", @@ -194,6 +197,8 @@ static const char * const rcu_torture_writer_state_names[] = { "RTWS_EXP_SYNC", "RTWS_COND_GET", "RTWS_COND_SYNC", + "RTWS_POLL_GET", + "RTWS_POLL_WAIT", "RTWS_SYNC", "RTWS_STUTTER", "RTWS_STOPPING", @@ -312,6 +317,8 @@ struct rcu_torture_ops { void (*sync)(void); void (*exp_sync)(void); unsigned long (*get_gp_state)(void); + unsigned long (*start_gp_poll)(void); + bool (*poll_gp_state)(unsigned long oldstate); void (*cond_sync)(unsigned long oldstate); call_rcu_func_t call; void (*cb_barrier)(void); @@ -570,6 +577,21 @@ static void srcu_torture_synchronize(void) synchronize_srcu(srcu_ctlp); } +static unsigned long srcu_torture_get_gp_state(void) +{ + return get_state_synchronize_srcu(srcu_ctlp); +} + +static unsigned long srcu_torture_start_gp_poll(void) +{ + return start_poll_synchronize_srcu(srcu_ctlp); +} + +static bool srcu_torture_poll_gp_state(unsigned long oldstate) +{ + return poll_state_synchronize_srcu(srcu_ctlp, oldstate); +} + static void srcu_torture_call(struct rcu_head *head, rcu_callback_t func) { @@ -601,6 +623,9 @@ static struct rcu_torture_ops srcu_ops = { .deferred_free = srcu_torture_deferred_free, .sync = srcu_torture_synchronize, .exp_sync = srcu_torture_synchronize_expedited, + .get_gp_state = srcu_torture_get_gp_state, + .start_gp_poll = srcu_torture_start_gp_poll, + .poll_gp_state = srcu_torture_poll_gp_state, .call = srcu_torture_call, .cb_barrier = srcu_torture_barrier, .stats = srcu_torture_stats, @@ -1027,18 +1052,20 @@ static int rcu_torture_writer(void *arg) { bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); + unsigned long cookie; int expediting = 0; unsigned long gp_snap; bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; - bool gp_sync1 = gp_sync; + bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; int i; + int idx; int oldnice = task_nice(current); struct rcu_torture *rp; struct rcu_torture *old_rp; static DEFINE_TORTURE_RANDOM(rand); bool stutter_waited; int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, - RTWS_COND_GET, RTWS_SYNC }; + RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; int nsynctypes = 0; VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); @@ -1048,8 +1075,8 @@ rcu_torture_writer(void *arg) torture_type, cur_ops->name); /* Initialize synctype[] array. If none set, take default. */ - if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) - gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; + if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) + gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true; if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { synctype[nsynctypes++] = RTWS_COND_GET; pr_info("%s: Testing conditional GPs.\n", __func__); @@ -1068,6 +1095,12 @@ rcu_torture_writer(void *arg) } else if (gp_normal && !cur_ops->deferred_free) { pr_alert("%s: gp_normal without primitives.\n", __func__); } + if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { + synctype[nsynctypes++] = RTWS_POLL_GET; + pr_info("%s: Testing polling GPs.\n", __func__); + } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { + pr_alert("%s: gp_poll without primitives.\n", __func__); + } if (gp_sync1 && cur_ops->sync) { synctype[nsynctypes++] = RTWS_SYNC; pr_info("%s: Testing normal GPs.\n", __func__); @@ -1107,6 +1140,18 @@ rcu_torture_writer(void *arg) atomic_inc(&rcu_torture_wcount[i]); WRITE_ONCE(old_rp->rtort_pipe_count, old_rp->rtort_pipe_count + 1); + if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { + idx = cur_ops->readlock(); + cookie = cur_ops->get_gp_state(); + WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && + cur_ops->poll_gp_state(cookie), + "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", + __func__, + rcu_torture_writer_state_getname(), + rcu_torture_writer_state, + cookie, cur_ops->get_gp_state()); + cur_ops->readunlock(idx); + } switch (synctype[torture_random(&rand) % nsynctypes]) { case RTWS_DEF_FREE: rcu_torture_writer_state = RTWS_DEF_FREE; @@ -1128,6 +1173,18 @@ rcu_torture_writer(void *arg) cur_ops->cond_sync(gp_snap); rcu_torture_pipe_update(old_rp); break; + case RTWS_POLL_GET: + rcu_torture_writer_state = RTWS_POLL_GET; + gp_snap = cur_ops->start_gp_poll(); + rcu_torture_writer_state = RTWS_POLL_WAIT; + while (!cur_ops->poll_gp_state(gp_snap)) { + i = torture_random(&rand) % 16; + if (i != 0) + schedule_timeout_interruptible(i); + udelay(torture_random(&rand) % 1000); + } + rcu_torture_pipe_update(old_rp); + break; case RTWS_SYNC: rcu_torture_writer_state = RTWS_SYNC; cur_ops->sync(); @@ -1137,6 +1194,14 @@ rcu_torture_writer(void *arg) WARN_ON_ONCE(1); break; } + if (cur_ops->get_gp_state && cur_ops->poll_gp_state) + WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && + !cur_ops->poll_gp_state(cookie), + "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", + __func__, + rcu_torture_writer_state_getname(), + rcu_torture_writer_state, + cookie, cur_ops->get_gp_state()); } WRITE_ONCE(rcu_torture_current_version, rcu_torture_current_version + 1); From bc480a6354ef2e15c26c3bdbd0db647026e788a7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 15 Nov 2020 12:45:57 -0800 Subject: [PATCH 023/130] rcutorture: Add reader-side tests of polling grace-period API This commit adds reader-side testing of the polling grace-period API. This testing verifies that a cookie obtained in an SRCU read-side critical section does not get a true return from poll_state_synchronize_srcu() within that same critical section. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 78ba95dfe05d..96d55f05e344 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1429,6 +1429,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, */ static bool rcu_torture_one_read(struct torture_random_state *trsp) { + unsigned long cookie; int i; unsigned long started; unsigned long completed; @@ -1444,6 +1445,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) WARN_ON_ONCE(!rcu_is_watching()); newstate = rcutorture_extend_mask(readstate, trsp); rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); + if (cur_ops->get_gp_state && cur_ops->poll_gp_state) + cookie = cur_ops->get_gp_state(); started = cur_ops->get_gp_seq(); ts = rcu_trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, @@ -1480,6 +1483,13 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) } __this_cpu_inc(rcu_torture_batch[completed]); preempt_enable(); + if (cur_ops->get_gp_state && cur_ops->poll_gp_state) + WARN_ONCE(cur_ops->poll_gp_state(cookie), + "%s: Cookie check 3 failed %s(%d) %lu->%lu\n", + __func__, + rcu_torture_writer_state_getname(), + rcu_torture_writer_state, + cookie, cur_ops->get_gp_state()); rcutorture_one_extend(&readstate, 0, trsp, rtrsp); WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); // This next splat is expected behavior if leakpointer, especially From 00504537f44422a99d97f615f2b3ee17cfba194d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 29 Oct 2020 15:08:57 -0700 Subject: [PATCH 024/130] rcutorture: Add testing for RCU's global memory ordering RCU guarantees that anything seen by a given reader will also be seen after any grace period that must wait on that reader. This is very likely to hold based on inspection, but the advantage of having rcutorture do the inspecting is that rcutorture doesn't mind inspecting frequently and often. This commit therefore adds code to test RCU's global memory ordering. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 98 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 92 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 96d55f05e344..338e1182b4b5 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -143,11 +143,22 @@ static struct task_struct *read_exit_task; #define RCU_TORTURE_PIPE_LEN 10 +// Mailbox-like structure to check RCU global memory ordering. +struct rcu_torture_reader_check { + unsigned long rtc_myloops; + int rtc_chkrdr; + unsigned long rtc_chkloops; + int rtc_ready; + struct rcu_torture_reader_check *rtc_assigner; +} ____cacheline_internodealigned_in_smp; + +// Update-side data structure used to check RCU readers. struct rcu_torture { struct rcu_head rtort_rcu; int rtort_pipe_count; struct list_head rtort_free; int rtort_mbtest; + struct rcu_torture_reader_check *rtort_chkp; }; static LIST_HEAD(rcu_torture_freelist); @@ -158,10 +169,13 @@ static DEFINE_SPINLOCK(rcu_torture_lock); static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; +static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; static atomic_t n_rcu_torture_alloc; static atomic_t n_rcu_torture_alloc_fail; static atomic_t n_rcu_torture_free; static atomic_t n_rcu_torture_mberror; +static atomic_t n_rcu_torture_mbchk_fail; +static atomic_t n_rcu_torture_mbchk_tries; static atomic_t n_rcu_torture_error; static long n_rcu_torture_barrier_error; static long n_rcu_torture_boost_ktrerror; @@ -393,7 +407,12 @@ static bool rcu_torture_pipe_update_one(struct rcu_torture *rp) { int i; + struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); + if (rtrcp) { + WRITE_ONCE(rp->rtort_chkp, NULL); + smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). + } i = READ_ONCE(rp->rtort_pipe_count); if (i > RCU_TORTURE_PIPE_LEN) i = RCU_TORTURE_PIPE_LEN; @@ -1292,6 +1311,62 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp) kfree(rhp); } +// Set up and carry out testing of RCU's global memory ordering +static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, + struct torture_random_state *trsp) +{ + unsigned long loops; + int noc = num_online_cpus(); + int rdrchked; + int rdrchker; + struct rcu_torture_reader_check *rtrcp; // Me. + struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. + struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. + struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. + + if (myid < 0) + return; // Don't try this from timer handlers. + + // Increment my counter. + rtrcp = &rcu_torture_reader_mbchk[myid]; + WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); + + // Attempt to assign someone else some checking work. + rdrchked = torture_random(trsp) % nrealreaders; + rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; + rdrchker = torture_random(trsp) % nrealreaders; + rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; + if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && + smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. + !READ_ONCE(rtp->rtort_chkp) && + !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. + rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); + WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); + rtrcp->rtc_chkrdr = rdrchked; + WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. + if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || + cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) + (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. + } + + // If assigned some completed work, do it! + rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); + if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) + return; // No work or work not yet ready. + rdrchked = rtrcp_assigner->rtc_chkrdr; + if (WARN_ON_ONCE(rdrchked < 0)) + return; + rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; + loops = READ_ONCE(rtrcp_chked->rtc_myloops); + atomic_inc(&n_rcu_torture_mbchk_tries); + if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) + atomic_inc(&n_rcu_torture_mbchk_fail); + rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; + rtrcp_assigner->rtc_ready = 0; + smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. + smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. +} + /* * Do one extension of an RCU read-side critical section using the * current reader state in readstate (set to zero for initial entry @@ -1427,7 +1502,7 @@ rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, * no data to read. Can be invoked both from process context and * from a timer handler. */ -static bool rcu_torture_one_read(struct torture_random_state *trsp) +static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) { unsigned long cookie; int i; @@ -1462,6 +1537,7 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp) } if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); + rcu_torture_reader_do_mbchk(myid, p, trsp); rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); preempt_disable(); pipe_count = READ_ONCE(p->rtort_pipe_count); @@ -1518,7 +1594,7 @@ static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); static void rcu_torture_timer(struct timer_list *unused) { atomic_long_inc(&n_rcu_torture_timers); - (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); + (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); /* Test call_rcu() invocation from interrupt handler. */ if (cur_ops->call) { @@ -1554,7 +1630,7 @@ rcu_torture_reader(void *arg) if (!timer_pending(&t)) mod_timer(&t, jiffies + 1); } - if (!rcu_torture_one_read(&rand) && !torture_must_stop()) + if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) schedule_timeout_interruptible(HZ); if (time_after(jiffies, lastsleep) && !torture_must_stop()) { schedule_timeout_interruptible(1); @@ -1614,8 +1690,9 @@ rcu_torture_stats_print(void) atomic_read(&n_rcu_torture_alloc), atomic_read(&n_rcu_torture_alloc_fail), atomic_read(&n_rcu_torture_free)); - pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", + pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", atomic_read(&n_rcu_torture_mberror), + atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), n_rcu_torture_barrier_error, n_rcu_torture_boost_ktrerror, n_rcu_torture_boost_rterror); @@ -1632,12 +1709,14 @@ rcu_torture_stats_print(void) pr_alert("%s%s ", torture_type, TORTURE_FLAG); if (atomic_read(&n_rcu_torture_mberror) || + atomic_read(&n_rcu_torture_mbchk_fail) || n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || i > 1) { pr_cont("%s", "!!! "); atomic_inc(&n_rcu_torture_error); WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); + WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio @@ -2467,7 +2546,7 @@ static int rcu_torture_read_exit_child(void *trsp_in) // Minimize time between reading and exiting. while (!kthread_should_stop()) schedule_timeout_uninterruptible(1); - (void)rcu_torture_one_read(trsp); + (void)rcu_torture_one_read(trsp, -1); return 0; } @@ -2582,6 +2661,8 @@ rcu_torture_cleanup(void) kfree(reader_tasks); reader_tasks = NULL; } + kfree(rcu_torture_reader_mbchk); + rcu_torture_reader_mbchk = NULL; if (fakewriter_tasks) { for (i = 0; i < nfakewriters; i++) @@ -2785,6 +2866,8 @@ rcu_torture_init(void) atomic_set(&n_rcu_torture_alloc_fail, 0); atomic_set(&n_rcu_torture_free, 0); atomic_set(&n_rcu_torture_mberror, 0); + atomic_set(&n_rcu_torture_mbchk_fail, 0); + atomic_set(&n_rcu_torture_mbchk_tries, 0); atomic_set(&n_rcu_torture_error, 0); n_rcu_torture_barrier_error = 0; n_rcu_torture_boost_ktrerror = 0; @@ -2826,12 +2909,15 @@ rcu_torture_init(void) } reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), GFP_KERNEL); - if (reader_tasks == NULL) { + rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), + GFP_KERNEL); + if (!reader_tasks || !rcu_torture_reader_mbchk) { VERBOSE_TOROUT_ERRSTRING("out of memory"); firsterr = -ENOMEM; goto unwind; } for (i = 0; i < nrealreaders; i++) { + rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, reader_tasks[i]); if (firsterr) From f3ea978b712f768a02137e867aced5bfdcea670e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 11 Nov 2020 10:12:05 -0800 Subject: [PATCH 025/130] scftorture: Add debug output for wrong-CPU warning This commit adds the desired CPU, the actual CPU, and nr_cpu_ids to the wrong-CPU warning in scftorture_invoker(), the better to help with debugging. Signed-off-by: Paul E. McKenney --- kernel/scftorture.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/scftorture.c b/kernel/scftorture.c index d55a9f8cda3d..2377cbb32474 100644 --- a/kernel/scftorture.c +++ b/kernel/scftorture.c @@ -398,6 +398,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra static int scftorture_invoker(void *arg) { int cpu; + int curcpu; DEFINE_TORTURE_RANDOM(rand); struct scf_statistics *scfp = (struct scf_statistics *)arg; bool was_offline = false; @@ -412,7 +413,10 @@ static int scftorture_invoker(void *arg) VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, smp_processor_id()); // Make sure that the CPU is affinitized appropriately during testing. - WARN_ON_ONCE(smp_processor_id() != scfp->cpu); + curcpu = smp_processor_id(); + WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, + "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n", + __func__, scfp->cpu, curcpu, nr_cpu_ids); if (!atomic_dec_return(&n_started)) while (atomic_read_acquire(&n_started)) { From b08ea1de6a8f8929c7dafd6f708799365fa90c11 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 6 Nov 2020 13:52:31 -0800 Subject: [PATCH 026/130] rcu: Mark obtuse portion of stall warning as internal debug There is a rather obtuse string that can be printed as part of an expedited RCU CPU stall-warning message that starts with "blocking rcu_node structures". Under normal conditions, most of this message is just repeating the list of CPUs blocking the current expedited grace period, but in a manner that is rather difficult to read. This commit therefore marks this message as "(internal RCU debug)" in an effort to give people the option of avoiding wasting time attempting to extract nonexistent additional meaning from this portion of the message. Reported-by: Jonathan Lemon Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_exp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 8760b6ead770..6c6ff06d4ae6 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -545,7 +545,7 @@ static void synchronize_rcu_expedited_wait(void) data_race(rnp_root->expmask), ".T"[!!data_race(rnp_root->exp_tasks)]); if (ndetected) { - pr_err("blocking rcu_node structures:"); + pr_err("blocking rcu_node structures (internal RCU debug):"); rcu_for_each_node_breadth_first(rnp) { if (rnp == rnp_root) continue; /* printed unconditionally */ From 243027a3c80564bf96e40437ffac46efb9f5f2b5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 11 Nov 2020 16:08:01 -0800 Subject: [PATCH 027/130] rcu: For RCU grace-period kthread starvation, dump last CPU it ran on When the RCU CPU stall-warning code detects that the RCU grace-period kthread is being starved, it dumps that kthread's stack. This can sometimes be useful, but it is also useful to know what is running on the CPU that this kthread is attempting to run on. This commit therefore adds a stack trace of this CPU in order to help track down whatever it is that might be preventing RCU's grace-period kthread from running. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_stall.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 70d48c52fabc..35c1355d44e7 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -449,20 +449,27 @@ static void print_cpu_stall_info(int cpu) /* Complain about starvation of grace-period kthread. */ static void rcu_check_gp_kthread_starvation(void) { + int cpu; struct task_struct *gpk = rcu_state.gp_kthread; unsigned long j; if (rcu_is_gp_kthread_starving(&j)) { + cpu = gpk ? task_cpu(gpk) : -1; pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n", rcu_state.name, j, (long)rcu_seq_current(&rcu_state.gp_seq), data_race(rcu_state.gp_flags), gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, - gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1); + gpk ? gpk->state : ~0, cpu); if (gpk) { pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); pr_err("RCU grace-period kthread stack dump:\n"); sched_show_task(gpk); + if (cpu >= 0) { + pr_err("Stack dump where RCU grace-period kthread last ran:\n"); + if (!trigger_single_cpu_backtrace(cpu)) + dump_cpu_task(cpu); + } wake_up_process(gpk); } } From 725969ac11d7fa50aa701321daa600ce421fc21b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 12 Nov 2020 12:19:47 -0800 Subject: [PATCH 028/130] rcu: Do not NMI offline CPUs Currently, RCU CPU stall warning messages will NMI whatever CPU looks like it is blocking either the current grace period or the grace-period kthread. This can produce confusing output if the target CPU is offline. This commit therefore checks for offline CPUs. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_stall.h | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 35c1355d44e7..29cf096b5d20 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -333,9 +333,12 @@ static void rcu_dump_cpu_stacks(void) rcu_for_each_leaf_node(rnp) { raw_spin_lock_irqsave_rcu_node(rnp, flags); for_each_leaf_node_possible_cpu(rnp, cpu) - if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) - if (!trigger_single_cpu_backtrace(cpu)) + if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { + if (cpu_is_offline(cpu)) + pr_err("Offline CPU %d blocking current GP.\n", cpu); + else if (!trigger_single_cpu_backtrace(cpu)) dump_cpu_task(cpu); + } raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } } @@ -466,9 +469,13 @@ static void rcu_check_gp_kthread_starvation(void) pr_err("RCU grace-period kthread stack dump:\n"); sched_show_task(gpk); if (cpu >= 0) { - pr_err("Stack dump where RCU grace-period kthread last ran:\n"); - if (!trigger_single_cpu_backtrace(cpu)) - dump_cpu_task(cpu); + if (cpu_is_offline(cpu)) { + pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); + } else { + pr_err("Stack dump where RCU GP kthread last ran:\n"); + if (!trigger_single_cpu_backtrace(cpu)) + dump_cpu_task(cpu); + } } wake_up_process(gpk); } From 0682aa7acd5d2688a8b781d91938e21ae4717c52 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 5 Nov 2020 20:01:46 -0800 Subject: [PATCH 029/130] torture: Make --kcsan specify lockdep The --kcsan argument to kvm.sh adds CONFIG_KCSAN_VERBOSE=y in order to get more detail from the KCSAN reports. However, this Kconfig option requires lockdep to be enabled. This commit therefore causes --kcsan to also enable lockdep. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 45d07b7b69f5..bd07df70b9c9 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -169,7 +169,7 @@ do TORTURE_KCONFIG_KASAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KASAN=y"; export TORTURE_KCONFIG_KASAN_ARG ;; --kcsan) - TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_VERBOSE=y CONFIG_KCSAN_INTERRUPT_WATCHER=y"; export TORTURE_KCONFIG_KCSAN_ARG + TORTURE_KCONFIG_KCSAN_ARG="CONFIG_DEBUG_INFO=y CONFIG_KCSAN=y CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n CONFIG_KCSAN_REPORT_ONCE_IN_MS=100000 CONFIG_KCSAN_INTERRUPT_WATCHER=y CONFIG_KCSAN_VERBOSE=y CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"; export TORTURE_KCONFIG_KCSAN_ARG ;; --kmake-arg|--kmake-args) checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' From 1f947be7f9696fca36e67f0897bc239b4755ae55 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 8 Nov 2020 15:38:26 -0800 Subject: [PATCH 030/130] torture: Make kvm.sh "--dryrun sched" summarize number of batches Knowing the number of batches that kvm.sh will split a run into allows estimation of the duration of a test, give or take the number of builds. This commit therefore adds a line of output to "--dryrun sched" that gives the number of batches that will be run. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index bd07df70b9c9..1078be1bd0ad 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -536,6 +536,8 @@ then egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" | sed -e 's/:.*$//' -e 's/^echo //' + nbatches="`grep 'Start batch' $T/script | grep -v ">>" | wc -l`" + echo Total number of batches: $nbatches exit 0 else # Not a dryrun, so run the script. From eca0501a7a2036d3e63aae80cf7f2594408374ff Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 8 Nov 2020 15:52:30 -0800 Subject: [PATCH 031/130] torture: Make kvm.sh "--dryrun sched" summarize number of builds Knowing the number of builds that kvm.sh will split a run into allows estimation of the duration of a test, give or take build duration. This commit therefore adds a line of output to "--dryrun sched" that gives the number of builds that will be run. This excludes "builds" for repeated scenarios that reuse an earlier build. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 1078be1bd0ad..55a18a93239f 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -536,6 +536,10 @@ then egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" | sed -e 's/:.*$//' -e 's/^echo //' + nbuilds="`grep 'Starting build\.' $T/script | + grep -v ">>" | sed -e 's/:.*$//' -e 's/^echo //' | + awk '{ print $1 }' | grep -v '\.' | wc -l`" + echo Total number of builds: $nbuilds nbatches="`grep 'Start batch' $T/script | grep -v ">>" | wc -l`" echo Total number of batches: $nbatches exit 0 From bc4073587067f2128b422f260fedd9fe0a8f7c4e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 11 Nov 2020 11:09:17 -0800 Subject: [PATCH 032/130] torture: Allow kvm.sh --datestamp to specify subdirectories Scripts like kvm-check-branches.sh group runs under a single directory in resdir in order to allow easier retrospective analysis. However, they do this by letting kvm.sh create a directory as usual and then moving it after the run. This can be very confusing when looking at the results while kvm-check-branches.sh is running. This commit therefore enables --datestamp to hand subdirectories to kvm.sh. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 55a18a93239f..0a9211a447a1 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -113,7 +113,7 @@ do shift ;; --datestamp) - checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--' + checkarg --datestamp "(relative pathname)" "$#" "$2" '^[a-zA-Z0-9._-/]*$' '^--' ds=$2 shift ;; @@ -375,7 +375,7 @@ if ! test -e $resdir then mkdir -p "$resdir" || : fi -mkdir $resdir/$ds +mkdir -p $resdir/$ds TORTURE_RESDIR="$resdir/$ds"; export TORTURE_RESDIR TORTURE_STOPFILE="$resdir/$ds/STOP"; export TORTURE_STOPFILE echo Results directory: $resdir/$ds From 315957cad445aa80e567983a43d9bb2a24a8534d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 16:28:18 -0800 Subject: [PATCH 033/130] torture: Prepare for splitting qemu execution from kvm-test-1-run.sh Distributed execution of rcutorture is eased if the qemu execution can be split from the building of the kernel, as this allows target systems to be used that are not set up to build kernels. It also avoids issues with toolchain version skew across the cluster, aside of course from qemu and KVM version skew. This commit therefore records needed data as comments in the qemu-cmd file and moves recording of the starting time to just before qemu is launched. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index 3cd03d01857c..4bc0e620edd0 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -125,7 +125,6 @@ seconds=$4 qemu_args=$5 boot_args=$6 -kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null` if test -z "$TORTURE_BUILDONLY" then echo ' ---' `date`: Starting kernel @@ -158,6 +157,8 @@ then boot_args="$boot_args $TORTURE_BOOT_GDB_ARG" fi echo $QEMU $qemu_args -m $TORTURE_QEMU_MEM -kernel $KERNEL -append \"$qemu_append $boot_args\" $TORTURE_QEMU_GDB_ARG > $resdir/qemu-cmd +echo "# TORTURE_SHUTDOWN_GRACE=$TORTURE_SHUTDOWN_GRACE" >> $resdir/qemu-cmd +echo "# seconds=$seconds" >> $resdir/qemu-cmd if test -n "$TORTURE_BUILDONLY" then @@ -174,6 +175,7 @@ echo 'echo $! > $resdir/qemu_pid' >> $T/qemu-cmd echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log # Attempt to run qemu +kstarttime=`gawk 'BEGIN { print systime() }' < /dev/null` ( . $T/qemu-cmd; wait `cat $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) & commandcompleted=0 if test -z "$TORTURE_KCONFIG_GDB_ARG" From d4a945e260b9eb59b1a90b9d6f2b0b953e27f803 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 17:38:48 -0800 Subject: [PATCH 034/130] torture: Add config2csv.sh script to compare torture scenarios This commit adds a config2csv.sh script that converts the specified torture-test scenarios' Kconfig options and kernel-boot parameters to .csv format. This allows easier comparison of scenarios when one fails and another does not. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/config2csv.sh | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100755 tools/testing/selftests/rcutorture/bin/config2csv.sh diff --git a/tools/testing/selftests/rcutorture/bin/config2csv.sh b/tools/testing/selftests/rcutorture/bin/config2csv.sh new file mode 100755 index 000000000000..d5a16631b16e --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/config2csv.sh @@ -0,0 +1,67 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# +# Create a spreadsheet from torture-test Kconfig options and kernel boot +# parameters. Run this in the directory containing the scenario files. +# +# Usage: config2csv path.csv [ "scenario1 scenario2 ..." ] +# +# By default, this script will take the list of scenarios from the CFLIST +# file in that directory, otherwise it will consider only the scenarios +# specified on the command line. It will examine each scenario's file +# and also its .boot file, if present, and create a column in the .csv +# output file. Note that "CFLIST" is a synonym for all the scenarios in the +# CFLIST file, which allows easy comparison of those scenarios with selected +# scenarios such as BUSTED that are normally omitted from CFLIST files. + +csvout=${1} +if test -z "$csvout" +then + echo "Need .csv output file as first argument." + exit 1 +fi +shift +defaultconfigs="`tr '\012' ' ' < CFLIST`" +if test "$#" -eq 0 +then + scenariosarg=$defaultconfigs +else + scenariosarg=$* +fi +scenarios="`echo $scenariosarg | sed -e "s/\/$defaultconfigs/g"`" + +T=/tmp/config2latex.sh.$$ +trap 'rm -rf $T' 0 +mkdir $T + +cat << '---EOF---' >> $T/p.awk +END { +---EOF--- +for i in $scenarios +do + echo ' s["'$i'"] = 1;' >> $T/p.awk + grep -v '^#' < $i | grep -v '^ *$' > $T/p + if test -r $i.boot + then + tr -s ' ' '\012' < $i.boot | grep -v '^#' >> $T/p + fi + sed -e 's/^[^=]*$/&=?/' < $T/p | + sed -e 's/^\([^=]*\)=\(.*\)$/\tp["\1:'"$i"'"] = "\2";\n\tc["\1"] = 1;/' >> $T/p.awk +done +cat << '---EOF---' >> $T/p.awk + ns = asorti(s, ss); + nc = asorti(c, cs); + for (j = 1; j <= ns; j++) + printf ",\"%s\"", ss[j]; + printf "\n"; + for (i = 1; i <= nc; i++) { + printf "\"%s\"", cs[i]; + for (j = 1; j <= ns; j++) { + printf ",\"%s\"", p[cs[i] ":" ss[j]]; + } + printf "\n"; + } +} +---EOF--- +awk -f $T/p.awk < /dev/null > $T/p.csv +cp $T/p.csv $csvout From 106cc0d9e79aa7fcb43bd8feab97ee6e114d348b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Thu, 19 Nov 2020 01:30:24 +0100 Subject: [PATCH 035/130] tools/rcutorture: Make identify_qemu_vcpus() independent of local language MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The rcutorture scripts' identify_qemu_vcpus() function expects `lscpu` to have a "CPU: " line, for example: CPU(s): 8 But different local language settings can give different results: Processeur(s) : 8 As a result, identify_qemu_vcpus() may return an empty string, resulting in the following warning (with the same local language settings): kvm-test-1-run.sh: ligne 138 : test: : nombre entier attendu comme expression This commit therefore changes identify_qemu_vcpus() to use getconf, which produces local-language-independend output. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: rcu@vger.kernel.org Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/functions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh index 82663495fb38..fef8b4b55c27 100644 --- a/tools/testing/selftests/rcutorture/bin/functions.sh +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -232,7 +232,7 @@ identify_qemu_args () { # Returns the number of virtual CPUs available to the aggregate of the # guest OSes. identify_qemu_vcpus () { - lscpu | grep '^CPU(s):' | sed -e 's/CPU(s)://' -e 's/[ ]*//g' + getconf _NPROCESSORS_ONLN } # print_bug From cb212767346ceba58c8b7bfdbbf45339b86e09c0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Nov 2020 15:23:04 -0800 Subject: [PATCH 036/130] torture: Make kvm.sh "Test Summary" date be end of test Currently, the "date" command producing the output on the kvm.sh "Test Summary" line is executed at the beginning of the test, which produces a date that is less than helpful to someone wanting to know the duration of the test. This commit therefore defers this command's execution to the end of the test. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 0a9211a447a1..c8356d508252 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -517,10 +517,12 @@ END { dump(first, i, batchnum); }' >> $T/script -cat << ___EOF___ >> $T/script +cat << '___EOF___' >> $T/script echo echo echo " --- `date` Test summary:" +___EOF___ +cat << ___EOF___ >> $T/script echo Results directory: $resdir/$ds kcsan-collapse.sh $resdir/$ds kvm-recheck.sh $resdir/$ds From 452613719eeea36de8ab13388a704fccb9d572dd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 20 Nov 2020 20:09:55 -0800 Subject: [PATCH 037/130] torture: Make kvm.sh arguments accumulate Given that kvm.sh in invoked from scripts, it is only natural for different levels of scripting to provide their own Kconfig option values, for example. Unfortunately, right now, the last such argument on the command line wins. This commit therefore makes the --bootargs, --configs, --kconfigs, --kmake-args, and --qemu-args argument values accumulate. For example, where "--configs TREE01 --configs TREE02" would previously have run only scenario TREE02, now it will run both scenarios. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index c8356d508252..6fd7ef7726a5 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -85,7 +85,7 @@ do ;; --bootargs|--bootarg) checkarg --bootargs "(list of kernel boot arguments)" "$#" "$2" '.*' '^--' - TORTURE_BOOTARGS="$2" + TORTURE_BOOTARGS="$TORTURE_BOOTARGS $2" shift ;; --bootimage) @@ -97,8 +97,8 @@ do TORTURE_BUILDONLY=1 ;; --configs|--config) - checkarg --configs "(list of config files)" "$#" "$2" '^[^/]*$' '^--' - configs="$2" + checkarg --configs "(list of config files)" "$#" "$2" '^[^/]\+$' '^--' + configs="$configs $2" shift ;; --cpus) @@ -162,7 +162,7 @@ do ;; --kconfig|--kconfigs) checkarg --kconfig "(Kconfig options)" $# "$2" '^CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\( CONFIG_[A-Z0-9_]\+=\([ynm]\|[0-9]\+\)\)*$' '^error$' - TORTURE_KCONFIG_ARG="$2" + TORTURE_KCONFIG_ARG="`echo "$TORTURE_KCONFIG_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`" shift ;; --kasan) @@ -173,7 +173,7 @@ do ;; --kmake-arg|--kmake-args) checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' - TORTURE_KMAKE_ARG="$2" + TORTURE_KMAKE_ARG="`echo "$TORTURE_KMAKE_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`" shift ;; --mac) @@ -191,7 +191,7 @@ do ;; --qemu-args|--qemu-arg) checkarg --qemu-args "(qemu arguments)" $# "$2" '^-' '^error' - TORTURE_QEMU_ARG="$2" + TORTURE_QEMU_ARG="`echo "$TORTURE_QEMU_ARG $2" | sed -e 's/^ *//' -e 's/ *$//'`" shift ;; --qemu-cmd) From 0bcca18348cfde8e59b77cdf6f3e278289a16e67 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 22 Nov 2020 09:55:34 -0800 Subject: [PATCH 038/130] torture: Print run duration at end of kvm.sh execution Yes, you can mentally subtract the timestamps, but this commit makes the computer do this work. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/functions.sh | 33 +++++++++++++++++++ tools/testing/selftests/rcutorture/bin/kvm.sh | 6 ++++ 2 files changed, 39 insertions(+) diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh index fef8b4b55c27..97c3a1764858 100644 --- a/tools/testing/selftests/rcutorture/bin/functions.sh +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -108,6 +108,39 @@ configfrag_hotplug_cpu () { grep -q '^CONFIG_HOTPLUG_CPU=y$' "$1" } +# get_starttime +# +# Returns a cookie identifying the current time. +get_starttime () { + awk 'BEGIN { print systime() }' < /dev/null +} + +# get_starttime_duration starttime +# +# Given the return value from get_starttime, compute a human-readable +# string denoting the time since get_starttime. +get_starttime_duration () { + awk -v starttime=$1 ' + BEGIN { + ts = systime() - starttime; + tm = int(ts / 60); + th = int(ts / 3600); + td = int(ts / 86400); + d = td; + h = th - td * 24; + m = tm - th * 60; + s = ts - tm * 60; + if (d >= 1) + printf "%dd %d:%02d:%02d\n", d, h, m, s + else if (h >= 1) + printf "%d:%02d:%02d\n", h, m, s + else if (m >= 1) + printf "%d:%02d.0\n", m, s + else + print s " seconds" + }' < /dev/null +} + # identify_boot_image qemu-cmd # # Returns the relative path to the kernel build image. This will be diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 6fd7ef7726a5..6f2126815e22 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -47,6 +47,9 @@ cpus=0 ds=`date +%Y.%m.%d-%H.%M.%S` jitter="-1" +startdate="`date`" +starttime="`get_starttime`" + usage () { echo "Usage: $scriptname optional arguments:" echo " --allcpus" @@ -548,6 +551,9 @@ then else # Not a dryrun, so run the script. sh $T/script + ret=$? + echo " --- Done at `date` (`get_starttime_duration $starttime`)" + exit $ret fi # Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier From 23239fc075d60a942101227c42353b5ced804269 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 23 Nov 2020 10:41:57 -0800 Subject: [PATCH 039/130] torture: Make kvm.sh return failure upon build failure The kvm.sh script uses kvm-find-errors.sh to evaluate whether or not a build failed. Unfortunately, kvm-find-errors.sh returns success if there are no failed runs (including when there are no runs at all) even if there are build failures. This commit therefore makes kvm-find-errors.sh return failure in response to build failures. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh index 6f50722f251f..be265987fa9d 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh @@ -39,6 +39,7 @@ done if test -n "$files" then $editor $files + editorret=1 else echo No build errors. fi @@ -62,5 +63,10 @@ then exit 1 else echo No errors in console logs. - exit 0 + if test -n "$editorret" + then + exit $editorret + else + exit 0 + fi fi From 22bf64cc94832a3b047a1412a4ad0f7d9bd6cd8b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 13:12:13 -0800 Subject: [PATCH 040/130] torture: Make kvm.sh include --kconfig arguments in CPU calculation Currently, passing something like "--kconfig CONFIG_NR_CPUS=2" to kvm.sh has no effect on scenario scheduling. For scenarios that do not specify the number of CPUs, this can result in kvm.sh wastefully scheduling only one scenario at a time even when the --kconfig argument would allow a number to be run concurrently. This commit therefore makes kvm.sh consider the --kconfig arguments when scheduling scenarios across the available CPUs. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 6f2126815e22..472929cb8312 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -290,7 +290,13 @@ for CF1 in $configs_derep do if test -f "$CONFIGFRAG/$CF1" then - cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1` + if echo "$TORTURE_KCONFIG_ARG" | grep -q '\ $T/KCONFIG_ARG + cpu_count=`configNR_CPUS.sh $T/KCONFIG_ARG` + else + cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1` + fi cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` cpu_count=`configfrag_boot_maxcpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` echo $CF1 $cpu_count >> $T/cfgcpu From 0beb394878a46bad6358f81dde2ef4aa0ef68af5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 26 Nov 2020 15:27:57 -0800 Subject: [PATCH 041/130] torture: Add kvm.sh test summary to end of log file This commit adds the test summary to the end of the log in the top-level directory containing the kvm.sh test artifacts. While in the area, it adds the kvm.sh exit code to this test summary. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 472929cb8312..667896f9c0d4 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -527,15 +527,18 @@ END { }' >> $T/script cat << '___EOF___' >> $T/script -echo -echo -echo " --- `date` Test summary:" +echo | tee -a $TORTURE_RESDIR/log +echo | tee -a $TORTURE_RESDIR/log +echo " --- `date` Test summary:" | tee -a $TORTURE_RESDIR/log ___EOF___ cat << ___EOF___ >> $T/script -echo Results directory: $resdir/$ds -kcsan-collapse.sh $resdir/$ds -kvm-recheck.sh $resdir/$ds +echo Results directory: $resdir/$ds | tee -a $resdir/$ds/log +kcsan-collapse.sh $resdir/$ds | tee -a $resdir/$ds/log +kvm-recheck.sh $resdir/$ds > $T/kvm-recheck.sh.out 2>&1 ___EOF___ +echo 'ret=$?' >> $T/script +echo "cat $T/kvm-recheck.sh.out | tee -a $resdir/$ds/log" >> $T/script +echo 'exit $ret' >> $T/script if test "$dryrun" = script then @@ -556,9 +559,9 @@ then exit 0 else # Not a dryrun, so run the script. - sh $T/script + bash $T/script ret=$? - echo " --- Done at `date` (`get_starttime_duration $starttime`)" + echo " --- Done at `date` (`get_starttime_duration $starttime`) exitcode $ret" | tee -a $resdir/$ds/log exit $ret fi From f716348f29d30e8ef3a1ceed3fea19490aba4fe4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 3 Dec 2020 13:27:42 -0800 Subject: [PATCH 042/130] torture: Stop hanging on panic By default, the "panic" kernel parameter is zero, which causes the kernel to loop indefinitely after a panic(). The rcutorture scripting will eventually kill the corresponding qemu process, but only after waiting for the full run duration plus a few minutes. This works, but delays notifying the developer of the failure. This commit therefore causes the rcutorture scripting to pass the "panic=-1" kernel parameter, which caused the kernel to instead unceremoniously shut down immediately. This in turn causes qemu to terminate, so that if all of the runs in a given batch panic(), the rcutorture scripting can immediately proceed to the next batch. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/functions.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/rcutorture/bin/functions.sh b/tools/testing/selftests/rcutorture/bin/functions.sh index 97c3a1764858..c35ba24f994c 100644 --- a/tools/testing/selftests/rcutorture/bin/functions.sh +++ b/tools/testing/selftests/rcutorture/bin/functions.sh @@ -203,6 +203,7 @@ identify_qemu () { # and the TORTURE_QEMU_INTERACTIVE environment variable. identify_qemu_append () { echo debug_boot_weak_hash + echo panic=-1 local console=ttyS0 case "$1" in qemu-system-x86_64|qemu-system-i386) From 755cf0afc16477bf55c837a35bf3b15461850194 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 11 Dec 2020 16:26:50 -0800 Subject: [PATCH 043/130] torture: Add --dryrun batches to help schedule a distributed run When all of the remote systems have the same number of CPUs, one approach is to use one "--buildonly" run and one "--dryrun sched" run, and then distributing the batches out one per remote system. However, the output of "--dryrun sched" is not made for parsing, so this commit adds a "--dryrun batches" that provides the same information in easily parsed form. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 667896f9c0d4..6b900360db07 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -60,7 +60,7 @@ usage () { echo " --cpus N" echo " --datestamp string" echo " --defconfig string" - echo " --dryrun sched|script" + echo " --dryrun batches|sched|script" echo " --duration minutes | s | h | d" echo " --gdb" echo " --help" @@ -126,7 +126,7 @@ do shift ;; --dryrun) - checkarg --dryrun "sched|script" $# "$2" 'sched\|script' '^--' + checkarg --dryrun "batches|sched|script" $# "$2" 'batches\|sched\|script' '^--' dryrun=$2 shift ;; @@ -235,7 +235,7 @@ do shift done -if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh +if test -n "$dryrun" || test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh then : else @@ -547,8 +547,7 @@ then elif test "$dryrun" = sched then # Extract the test run schedule from the script. - egrep 'Start batch|Starting build\.' $T/script | - grep -v ">>" | + egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" | sed -e 's/:.*$//' -e 's/^echo //' nbuilds="`grep 'Starting build\.' $T/script | grep -v ">>" | sed -e 's/:.*$//' -e 's/^echo //' | @@ -557,6 +556,19 @@ then nbatches="`grep 'Start batch' $T/script | grep -v ">>" | wc -l`" echo Total number of batches: $nbatches exit 0 +elif test "$dryrun" = batches +then + # Extract the tests and their batches from the script. + egrep 'Start batch|Starting build\.' $T/script | grep -v ">>" | + sed -e 's/:.*$//' -e 's/^echo //' -e 's/-ovf//' | + awk ' + /^----Start/ { + batchno = $3; + next; + } + { + print batchno, $1, $2 + }' else # Not a dryrun, so run the script. bash $T/script From c821f855f625f763a87c49f413aa4f60974b5071 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 11 Dec 2020 16:59:40 -0800 Subject: [PATCH 044/130] torture: s/STOP/STOP.1/ to avoid scenario collision This commit changes the "STOP" file that is used to cleanly halt a running rcutorture run to "STOP.1" because no scenario directory will ever end with ".1". If there really was a scenario named "STOP", its directories would instead be named "STOP", "STOP.2", "STOP.3", and so on. While in the area, the commit also changes the kernel-run-time checks for this file to look directly in the directory above $resdir, thus avoiding the need to pass the TORTURE_STOPFILE environment variable to remote systems. While in the area, move the STOP.1 file to the top-level directory covering all of the scenarios. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh | 8 ++++---- tools/testing/selftests/rcutorture/bin/kvm.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh index 4bc0e620edd0..536d103ef166 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh @@ -211,7 +211,7 @@ do if test -n "$TORTURE_KCONFIG_GDB_ARG" then : - elif test $kruntime -ge $seconds || test -f "$TORTURE_STOPFILE" + elif test $kruntime -ge $seconds || test -f "$resdir/../STOP.1" then break; fi @@ -254,16 +254,16 @@ then fi if test $commandcompleted -eq 0 -a -n "$qemu_pid" then - if ! test -f "$TORTURE_STOPFILE" + if ! test -f "$resdir/../STOP.1" then echo Grace period for qemu job at pid $qemu_pid fi oldline="`tail $resdir/console.log`" while : do - if test -f "$TORTURE_STOPFILE" + if test -f "$resdir/../STOP.1" then - echo "PID $qemu_pid killed due to run STOP request" >> $resdir/Warnings 2>&1 + echo "PID $qemu_pid killed due to run STOP.1 request" >> $resdir/Warnings 2>&1 kill -KILL $qemu_pid break fi diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 6b900360db07..605186888fd9 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -386,7 +386,7 @@ then fi mkdir -p $resdir/$ds TORTURE_RESDIR="$resdir/$ds"; export TORTURE_RESDIR -TORTURE_STOPFILE="$resdir/$ds/STOP"; export TORTURE_STOPFILE +TORTURE_STOPFILE="$resdir/$ds/STOP.1"; export TORTURE_STOPFILE echo Results directory: $resdir/$ds echo $scriptname $args touch $resdir/$ds/log From 365dc5cb62c8714e27554e44464f6e0e9c1fdbdf Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 20 Dec 2020 16:52:29 -0800 Subject: [PATCH 045/130] torture: Simplify exit-code plumbing for kvm-recheck.sh and kvm-find-errors.sh This commit simplifies exit-code plumbing. It makes kvm-recheck.sh return the value 1 for a build error and 2 for a runtime error. It also makes kvm-find-errors.sh avoid checking runtime files for --build-only runs. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh | 1 + tools/testing/selftests/rcutorture/bin/kvm-recheck.sh | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh index be265987fa9d..0670841122d8 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-find-errors.sh @@ -46,6 +46,7 @@ fi if grep -q -e "--buildonly" < ${rundir}/log then echo Build-only run, no console logs to check. + exit $editorret fi # Find console logs with errors diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh index 840a4679a0d7..47cf4db10896 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh @@ -87,15 +87,16 @@ do fi done EDITOR=echo kvm-find-errors.sh "${@: -1}" > $T 2>&1 -ret=$? builderrors="`tr ' ' '\012' < $T | grep -c '/Make.out.diags'`" if test "$builderrors" -gt 0 then echo $builderrors runs with build errors. + ret=1 fi runerrors="`tr ' ' '\012' < $T | grep -c '/console.log.diags'`" if test "$runerrors" -gt 0 then echo $runerrors runs with runtime errors. + ret=2 fi exit $ret From 546eee2d931b3d76357a9c813778203001375fe1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Dec 2020 10:35:39 -0800 Subject: [PATCH 046/130] torture: Remove "Failed to add ttynull console" false positive Commit 757055ae8ded ("init/console: Use ttynull as a fallback when there is no console") results in the string "Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!" appearing on the console, which the rcutorture scripting interprets as a warning, which causes every rcutorture run to be flagged. However, the rcutorture init process never attempts to do any I/O, and thus does not care that it has no stdin, stdout, or stderr. This commit therefore causes the rcutorture scripting to ignore this message. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/console-badness.sh | 1 + tools/testing/selftests/rcutorture/bin/parse-console.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/console-badness.sh b/tools/testing/selftests/rcutorture/bin/console-badness.sh index 80ae7f08b363..e6a132df6172 100755 --- a/tools/testing/selftests/rcutorture/bin/console-badness.sh +++ b/tools/testing/selftests/rcutorture/bin/console-badness.sh @@ -14,4 +14,5 @@ egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls o grep -v 'ODEBUG: ' | grep -v 'This means that this is a DEBUG kernel and it is' | grep -v 'Warning: unable to open an initial console' | +grep -v 'Warning: Failed to add ttynull console. No stdin, stdout, and stderr.*the init process!' | grep -v 'NOHZ tick-stop error: Non-RCU local softirq work is pending, handler' diff --git a/tools/testing/selftests/rcutorture/bin/parse-console.sh b/tools/testing/selftests/rcutorture/bin/parse-console.sh index 263b1be50008..9f624bd53c27 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-console.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-console.sh @@ -128,7 +128,7 @@ then then summary="$summary Badness: $n_badness" fi - n_warn=`grep -v 'Warning: unable to open an initial console' $file | egrep -c 'WARNING:|Warn'` + n_warn=`grep -v 'Warning: unable to open an initial console' $file | grep -v 'Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process' | egrep -c 'WARNING:|Warn'` if test "$n_warn" -ne 0 then summary="$summary Warnings: $n_warn" From b79b0b67791316e6ca0502bd0f2ecd7018d6d9e8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Dec 2020 16:00:27 -0800 Subject: [PATCH 047/130] torture: Allow standalone kvm-recheck.sh run detect --trust-make Normally, kvm-recheck.sh is run from kvm.sh, which provides the TORTURE_TRUST_MAKE environment variable that, if a non-empty string, indicates that the --trust-make command-line parameter has been passed to kvm.sh. If there was no --trust-make, kvm-recheck.sh insists that the Make.out file contain at least one "CC" command. Thus, when kvm-recheck.sh is run standalone to evaluate a prior --trust-make run, it will incorrectly insist that a proper kernel build did not happen. This commit therefore causes kvm-recheck.sh to also search the "log" file in the top-level results directory for the string "--trust-make". Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/parse-build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/parse-build.sh b/tools/testing/selftests/rcutorture/bin/parse-build.sh index 09155c15ea65..9313e5065ae9 100755 --- a/tools/testing/selftests/rcutorture/bin/parse-build.sh +++ b/tools/testing/selftests/rcutorture/bin/parse-build.sh @@ -21,7 +21,7 @@ mkdir $T . functions.sh -if grep -q CC < $F || test -n "$TORTURE_TRUST_MAKE" +if grep -q CC < $F || test -n "$TORTURE_TRUST_MAKE" || grep -qe --trust-make < `dirname $F`/../log then : else From 1b7af295541d75535374325fd617944534853919 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 29 Aug 2020 10:22:24 -0700 Subject: [PATCH 048/130] sched/core: Allow try_invoke_on_locked_down_task() with irqs disabled The try_invoke_on_locked_down_task() function currently requires that interrupts be enabled, but it is called with interrupts disabled from rcu_print_task_stall(), resulting in an "IRQs not enabled as expected" diagnostic. This commit therefore updates try_invoke_on_locked_down_task() to use raw_spin_lock_irqsave() instead of raw_spin_lock_irq(), thus allowing use from either context. Link: https://lore.kernel.org/lkml/000000000000903d5805ab908fc4@google.com/ Link: https://lore.kernel.org/lkml/20200928075729.GC2611@hirez.programming.kicks-ass.net/ Reported-by: syzbot+cb3b69ae80afd6535b0e@syzkaller.appspotmail.com Signed-off-by: Peter Zijlstra Signed-off-by: Paul E. McKenney --- kernel/sched/core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e7e453492cff..f768bb0df74b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2989,7 +2989,7 @@ out: /** * try_invoke_on_locked_down_task - Invoke a function on task in fixed state - * @p: Process for which the function is to be invoked. + * @p: Process for which the function is to be invoked, can be @current. * @func: Function to invoke. * @arg: Argument to function. * @@ -3007,12 +3007,11 @@ out: */ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg) { - bool ret = false; struct rq_flags rf; + bool ret = false; struct rq *rq; - lockdep_assert_irqs_enabled(); - raw_spin_lock_irq(&p->pi_lock); + raw_spin_lock_irqsave(&p->pi_lock, rf.flags); if (p->on_rq) { rq = __task_rq_lock(p, &rf); if (task_rq(p) == rq) @@ -3029,7 +3028,7 @@ bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct t ret = func(p, arg); } } - raw_spin_unlock_irq(&p->pi_lock); + raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); return ret; } From c5586e32dfe258925c5dbb599bea3eadf34e79c1 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Sat, 7 Nov 2020 16:24:03 +0800 Subject: [PATCH 049/130] locking: Remove duplicate include of percpu-rwsem.h This commit removes an unnecessary #include. Signed-off-by: Wang Qing Signed-off-by: Paul E. McKenney --- kernel/locking/locktorture.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 62d215b2e39f..af99e9ca285a 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -27,7 +27,6 @@ #include #include #include -#include #include MODULE_LICENSE("GPL"); From a649d25dcc671a33b9cc3176411920fdc5fbd98e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Nov 2020 10:13:06 -0800 Subject: [PATCH 050/130] rcu: Add lockdep_assert_irqs_disabled() to rcu_sched_clock_irq() and callees This commit adds a number of lockdep_assert_irqs_disabled() calls to rcu_sched_clock_irq() and a number of the functions that it calls. The point of this is to help track down a situation where lockdep appears to be insisting that interrupts are enabled within these functions, which should only ever be invoked from the scheduling-clock interrupt handler. Link: https://lore.kernel.org/lkml/20201111133813.GA81547@elver.google.com/ Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 4 ++++ kernel/rcu/tree_plugin.h | 1 + kernel/rcu/tree_stall.h | 8 ++++++++ 3 files changed, 13 insertions(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index bd04b09b84b3..f70634f7c3aa 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2553,6 +2553,7 @@ static void rcu_do_batch(struct rcu_data *rdp) void rcu_sched_clock_irq(int user) { trace_rcu_utilization(TPS("Start scheduler-tick")); + lockdep_assert_irqs_disabled(); raw_cpu_inc(rcu_data.ticks_this_gp); /* The load-acquire pairs with the store-release setting to true. */ if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { @@ -2566,6 +2567,7 @@ void rcu_sched_clock_irq(int user) rcu_flavor_sched_clock_irq(user); if (rcu_pending(user)) invoke_rcu_core(); + lockdep_assert_irqs_disabled(); trace_rcu_utilization(TPS("End scheduler-tick")); } @@ -3690,6 +3692,8 @@ static int rcu_pending(int user) struct rcu_data *rdp = this_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; + lockdep_assert_irqs_disabled(); + /* Check for CPU stalls, if enabled. */ check_cpu_stall(rdp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index fd8a52e9a887..cb76e70da8ca 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -682,6 +682,7 @@ static void rcu_flavor_sched_clock_irq(int user) { struct task_struct *t = current; + lockdep_assert_irqs_disabled(); if (user || rcu_is_cpu_rrupt_from_idle()) { rcu_note_voluntary_context_switch(current); } diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index ca21d28a0f98..4024dcc78aac 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -260,6 +260,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) struct task_struct *t; struct task_struct *ts[8]; + lockdep_assert_irqs_disabled(); if (!rcu_preempt_blocked_readers_cgp(rnp)) return 0; pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", @@ -284,6 +285,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) ".q"[rscr.rs.b.need_qs], ".e"[rscr.rs.b.exp_hint], ".l"[rscr.on_blkd_list]); + lockdep_assert_irqs_disabled(); put_task_struct(t); ndetected++; } @@ -472,6 +474,8 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) struct rcu_node *rnp; long totqlen = 0; + lockdep_assert_irqs_disabled(); + /* Kick and suppress, if so configured. */ rcu_stall_kick_kthreads(); if (rcu_stall_is_suppressed()) @@ -493,6 +497,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) } } ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. + lockdep_assert_irqs_disabled(); } for_each_possible_cpu(cpu) @@ -538,6 +543,8 @@ static void print_cpu_stall(unsigned long gps) struct rcu_node *rnp = rcu_get_root(); long totqlen = 0; + lockdep_assert_irqs_disabled(); + /* Kick and suppress, if so configured. */ rcu_stall_kick_kthreads(); if (rcu_stall_is_suppressed()) @@ -592,6 +599,7 @@ static void check_cpu_stall(struct rcu_data *rdp) unsigned long js; struct rcu_node *rnp; + lockdep_assert_irqs_disabled(); if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || !rcu_gp_in_progress()) return; From 7dffe01765d9309b8bd5505503933ec0ec53d192 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 19 Nov 2020 13:30:33 -0800 Subject: [PATCH 051/130] rcu: Add lockdep_assert_irqs_disabled() to raw_spin_unlock_rcu_node() macros This commit adds a lockdep_assert_irqs_disabled() call to the helper macros that release the rcu_node structure's ->lock, namely to raw_spin_unlock_rcu_node(), raw_spin_unlock_irq_rcu_node() and raw_spin_unlock_irqrestore_rcu_node(). The point of this is to help track down a situation where lockdep appears to be insisting that interrupts are enabled while holding an rcu_node structure's ->lock. Link: https://lore.kernel.org/lkml/20201111133813.GA81547@elver.google.com/ Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu.h | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index e01cba5e4b52..839f5be65244 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -378,7 +378,11 @@ do { \ smp_mb__after_unlock_lock(); \ } while (0) -#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) +#define raw_spin_unlock_rcu_node(p) \ +do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \ +} while (0) #define raw_spin_lock_irq_rcu_node(p) \ do { \ @@ -387,7 +391,10 @@ do { \ } while (0) #define raw_spin_unlock_irq_rcu_node(p) \ - raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) +do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \ +} while (0) #define raw_spin_lock_irqsave_rcu_node(p, flags) \ do { \ @@ -396,7 +403,10 @@ do { \ } while (0) #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ - raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) +do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \ +} while (0) #define raw_spin_trylock_rcu_node(p) \ ({ \ From bfba7ed084f8ab0269a5a1d2f51b07865456c334 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Wed, 9 Dec 2020 21:27:32 +0100 Subject: [PATCH 052/130] rcu-tasks: Add RCU-tasks self tests This commit adds self tests for early-boot use of RCU-tasks grace periods. It tests all three variants (Rude, Tasks, and Tasks Trace) and covers both synchronous (e.g., synchronize_rcu_tasks()) and asynchronous (e.g., call_rcu_tasks()) grace-period APIs. Self-tests are run only in kernels built with CONFIG_PROVE_RCU=y. Signed-off-by: Uladzislau Rezki (Sony) [ paulmck: Handle CONFIG_PROVE_RCU=n and identify test cases' callbacks. ] Signed-off-by: Paul E. McKenney --- kernel/rcu/tasks.h | 79 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 73bbe792fe1e..74767d365752 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -1231,6 +1231,82 @@ void show_rcu_tasks_gp_kthreads(void) } #endif /* #ifndef CONFIG_TINY_RCU */ +#ifdef CONFIG_PROVE_RCU +struct rcu_tasks_test_desc { + struct rcu_head rh; + const char *name; + bool notrun; +}; + +static struct rcu_tasks_test_desc tests[] = { + { + .name = "call_rcu_tasks()", + /* If not defined, the test is skipped. */ + .notrun = !IS_ENABLED(CONFIG_TASKS_RCU), + }, + { + .name = "call_rcu_tasks_rude()", + /* If not defined, the test is skipped. */ + .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU), + }, + { + .name = "call_rcu_tasks_trace()", + /* If not defined, the test is skipped. */ + .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU) + } +}; + +static void test_rcu_tasks_callback(struct rcu_head *rhp) +{ + struct rcu_tasks_test_desc *rttd = + container_of(rhp, struct rcu_tasks_test_desc, rh); + + pr_info("Callback from %s invoked.\n", rttd->name); + + rttd->notrun = true; +} + +static void rcu_tasks_initiate_self_tests(void) +{ + pr_info("Running RCU-tasks wait API self tests\n"); +#ifdef CONFIG_TASKS_RCU + synchronize_rcu_tasks(); + call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); +#endif + +#ifdef CONFIG_TASKS_RUDE_RCU + synchronize_rcu_tasks_rude(); + call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); +#endif + +#ifdef CONFIG_TASKS_TRACE_RCU + synchronize_rcu_tasks_trace(); + call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); +#endif +} + +static int rcu_tasks_verify_self_tests(void) +{ + int ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (!tests[i].notrun) { // still hanging. + pr_err("%s has been failed.\n", tests[i].name); + ret = -1; + } + } + + if (ret) + WARN_ON(1); + + return ret; +} +late_initcall(rcu_tasks_verify_self_tests); +#else /* #ifdef CONFIG_PROVE_RCU */ +static void rcu_tasks_initiate_self_tests(void) { } +#endif /* #else #ifdef CONFIG_PROVE_RCU */ + void __init rcu_init_tasks_generic(void) { #ifdef CONFIG_TASKS_RCU @@ -1244,6 +1320,9 @@ void __init rcu_init_tasks_generic(void) #ifdef CONFIG_TASKS_TRACE_RCU rcu_spawn_tasks_trace_kthread(); #endif + + // Run the self-tests. + rcu_tasks_initiate_self_tests(); } #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ From c26165efac41bce0c7764262b21f5897e771f34f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 21 Dec 2020 21:00:18 -0800 Subject: [PATCH 053/130] rcu: Make TASKS_TRACE_RCU select IRQ_WORK Tasks Trace RCU uses irq_work_queue() to safely awaken its grace-period kthread, so this commit therefore causes the TASKS_TRACE_RCU Kconfig option select the IRQ_WORK Kconfig option. Reported-by: kernel test robot Acked-by: Randy Dunlap # build-tested Signed-off-by: Paul E. McKenney --- kernel/rcu/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig index b71e21f73c40..84dfa8dae1b2 100644 --- a/kernel/rcu/Kconfig +++ b/kernel/rcu/Kconfig @@ -95,6 +95,7 @@ config TASKS_RUDE_RCU config TASKS_TRACE_RCU def_bool 0 + select IRQ_WORK help This option enables a task-based RCU implementation that uses explicit rcu_read_lock_trace() read-side markers, and allows From 1120281713a5c8d9caffaa49db11fd0a25e34ef0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 24 Dec 2020 15:28:14 -0800 Subject: [PATCH 054/130] torture: Do Kconfig analysis only once per scenario Currently, if a scenario is repeated as in "--configs '4*TREE01'", the Kconfig analysis is performed for each occurrance (four times in this example) and each analysis places the exact same data into the exact same files. This is not really an issue in this repetition-four example, but it can needlessly consume tens of seconds of wallclock time for something like "--config '128*TINY01'". This commit therefore does Kconfig analysis only once per set of repeats of a given scenario, courtesy of the "sort -u" command and an automatically generated awk script. While in the area, this commit also wordsmiths a comment. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/kvm.sh | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh index 605186888fd9..8d3c99b35e06 100755 --- a/tools/testing/selftests/rcutorture/bin/kvm.sh +++ b/tools/testing/selftests/rcutorture/bin/kvm.sh @@ -286,7 +286,8 @@ then exit 1 fi fi -for CF1 in $configs_derep +echo 'BEGIN {' > $T/cfgcpu.awk +for CF1 in `echo $configs_derep | tr -s ' ' '\012' | sort -u` do if test -f "$CONFIGFRAG/$CF1" then @@ -299,12 +300,20 @@ do fi cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` cpu_count=`configfrag_boot_maxcpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"` - echo $CF1 $cpu_count >> $T/cfgcpu + echo 'scenariocpu["'"$CF1"'"] = '"$cpu_count"';' >> $T/cfgcpu.awk else echo "The --configs file $CF1 does not exist, terminating." exit 1 fi done +cat << '___EOF___' >> $T/cfgcpu.awk +} +{ + for (i = 1; i <= NF; i++) + print $i, scenariocpu[$i]; +} +___EOF___ +echo $configs_derep | awk -f $T/cfgcpu.awk > $T/cfgcpu sort -k2nr $T/cfgcpu -T="$T" > $T/cfgcpu.sort # Use a greedy bin-packing algorithm, sorting the list accordingly. @@ -324,11 +333,10 @@ END { batch = 0; nc = -1; - # Each pass through the following loop creates on test batch - # that can be executed concurrently given ncpus. Note that a - # given test that requires more than the available CPUs will run in - # their own batch. Such tests just have to make do with what - # is available. + # Each pass through the following loop creates on test batch that + # can be executed concurrently given ncpus. Note that a given test + # that requires more than the available CPUs will run in its own + # batch. Such tests just have to make do with what is available. while (nc != ncpus) { batch++; nc = ncpus; From 361c0f3d80dc3b54c20a19e8ffa2ad728fc1d23d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 15 Dec 2020 15:16:48 +0100 Subject: [PATCH 055/130] doc: Update RCU's requirements page about the PREEMPT_RT wiki The PREEMPT_RT wiki moved from kernel.org to the Linux Foundation wiki. The kernel.org wiki is read only. This commit therefore updates the URL of the active PREEMPT_RT wiki. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- Documentation/RCU/Design/Requirements/Requirements.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index 65c7839114a5..bac1cdde57d1 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -2317,7 +2317,7 @@ decides to throw at it. The Linux kernel is used for real-time workloads, especially in conjunction with the `-rt -patchset `__. The +patchset `__. The real-time-latency response requirements are such that the traditional approach of disabling preemption across RCU read-side critical sections is inappropriate. Kernels built with ``CONFIG_PREEMPT=y`` therefore use From 81ad58be2f83f9bd675f67ca5b8f420358ddf13c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 15 Dec 2020 15:16:49 +0100 Subject: [PATCH 056/130] doc: Use CONFIG_PREEMPTION CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Update the documents and mention CONFIG_PREEMPTION. Spell out CONFIG_PREEMPT_RT (instead PREEMPT_RT) since it is an option now. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paul E. McKenney --- .../Expedited-Grace-Periods.rst | 4 ++-- .../RCU/Design/Requirements/Requirements.rst | 22 +++++++++---------- Documentation/RCU/checklist.rst | 2 +- Documentation/RCU/rcubarrier.rst | 6 ++--- Documentation/RCU/stallwarn.rst | 4 ++-- Documentation/RCU/whatisRCU.rst | 10 ++++----- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst index 72f0f6fbd53c..6f89cf1e567d 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.rst @@ -38,7 +38,7 @@ sections. RCU-preempt Expedited Grace Periods =================================== -``CONFIG_PREEMPT=y`` kernels implement RCU-preempt. +``CONFIG_PREEMPTION=y`` kernels implement RCU-preempt. The overall flow of the handling of a given CPU by an RCU-preempt expedited grace period is shown in the following diagram: @@ -112,7 +112,7 @@ things. RCU-sched Expedited Grace Periods --------------------------------- -``CONFIG_PREEMPT=n`` kernels implement RCU-sched. The overall flow of +``CONFIG_PREEMPTION=n`` kernels implement RCU-sched. The overall flow of the handling of a given CPU by an RCU-sched expedited grace period is shown in the following diagram: diff --git a/Documentation/RCU/Design/Requirements/Requirements.rst b/Documentation/RCU/Design/Requirements/Requirements.rst index bac1cdde57d1..42a81e30619e 100644 --- a/Documentation/RCU/Design/Requirements/Requirements.rst +++ b/Documentation/RCU/Design/Requirements/Requirements.rst @@ -78,7 +78,7 @@ RCU treats a nested set as one big RCU read-side critical section. Production-quality implementations of rcu_read_lock() and rcu_read_unlock() are extremely lightweight, and in fact have exactly zero overhead in Linux kernels built for production use with -``CONFIG_PREEMPT=n``. +``CONFIG_PREEMPTION=n``. This guarantee allows ordering to be enforced with extremely low overhead to readers, for example: @@ -1181,7 +1181,7 @@ and has become decreasingly so as memory sizes have expanded and memory costs have plummeted. However, as I learned from Matt Mackall's `bloatwatch `__ efforts, memory footprint is critically important on single-CPU systems with -non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny +non-preemptible (``CONFIG_PREEMPTION=n``) kernels, and thus `tiny RCU `__ was born. Josh Triplett has since taken over the small-memory banner with his `Linux kernel tinification `__ @@ -1497,7 +1497,7 @@ limitations. Implementations of RCU for which rcu_read_lock() and rcu_read_unlock() generate no code, such as Linux-kernel RCU when -``CONFIG_PREEMPT=n``, can be nested arbitrarily deeply. After all, there +``CONFIG_PREEMPTION=n``, can be nested arbitrarily deeply. After all, there is no overhead. Except that if all these instances of rcu_read_lock() and rcu_read_unlock() are visible to the compiler, compilation will eventually fail due to exhausting memory, @@ -1769,7 +1769,7 @@ implementation can be a no-op. However, once the scheduler has spawned its first kthread, this early boot trick fails for synchronize_rcu() (as well as for -synchronize_rcu_expedited()) in ``CONFIG_PREEMPT=y`` kernels. The +synchronize_rcu_expedited()) in ``CONFIG_PREEMPTION=y`` kernels. The reason is that an RCU read-side critical section might be preempted, which means that a subsequent synchronize_rcu() really does have to wait for something, as opposed to simply returning immediately. @@ -2038,7 +2038,7 @@ the following: 5 rcu_read_unlock(); 6 do_something_with(v, user_v); -If the compiler did make this transformation in a ``CONFIG_PREEMPT=n`` kernel +If the compiler did make this transformation in a ``CONFIG_PREEMPTION=n`` kernel build, and if get_user() did page fault, the result would be a quiescent state in the middle of an RCU read-side critical section. This misplaced quiescent state could result in line 4 being a use-after-free access, @@ -2320,7 +2320,7 @@ conjunction with the `-rt patchset `__. The real-time-latency response requirements are such that the traditional approach of disabling preemption across RCU read-side critical sections -is inappropriate. Kernels built with ``CONFIG_PREEMPT=y`` therefore use +is inappropriate. Kernels built with ``CONFIG_PREEMPTION=y`` therefore use an RCU implementation that allows RCU read-side critical sections to be preempted. This requirement made its presence known after users made it clear that an earlier `real-time @@ -2460,11 +2460,11 @@ not have this property, given that any point in the code outside of an RCU read-side critical section can be a quiescent state. Therefore, *RCU-sched* was created, which follows “classic” RCU in that an RCU-sched grace period waits for pre-existing interrupt and NMI -handlers. In kernels built with ``CONFIG_PREEMPT=n``, the RCU and +handlers. In kernels built with ``CONFIG_PREEMPTION=n``, the RCU and RCU-sched APIs have identical implementations, while kernels built with -``CONFIG_PREEMPT=y`` provide a separate implementation for each. +``CONFIG_PREEMPTION=y`` provide a separate implementation for each. -Note well that in ``CONFIG_PREEMPT=y`` kernels, +Note well that in ``CONFIG_PREEMPTION=y`` kernels, rcu_read_lock_sched() and rcu_read_unlock_sched() disable and re-enable preemption, respectively. This means that if there was a preemption attempt during the RCU-sched read-side critical section, @@ -2627,10 +2627,10 @@ userspace execution also delimit tasks-RCU read-side critical sections. The tasks-RCU API is quite compact, consisting only of call_rcu_tasks(), synchronize_rcu_tasks(), and -rcu_barrier_tasks(). In ``CONFIG_PREEMPT=n`` kernels, trampolines +rcu_barrier_tasks(). In ``CONFIG_PREEMPTION=n`` kernels, trampolines cannot be preempted, so these APIs map to call_rcu(), synchronize_rcu(), and rcu_barrier(), respectively. In -``CONFIG_PREEMPT=y`` kernels, trampolines can be preempted, and these +``CONFIG_PREEMPTION=y`` kernels, trampolines can be preempted, and these three APIs are therefore implemented by separate functions that check for voluntary context switches. diff --git a/Documentation/RCU/checklist.rst b/Documentation/RCU/checklist.rst index 2d1dc1deffc9..1030119294d0 100644 --- a/Documentation/RCU/checklist.rst +++ b/Documentation/RCU/checklist.rst @@ -212,7 +212,7 @@ over a rather long period of time, but improvements are always welcome! the rest of the system. 7. As of v4.20, a given kernel implements only one RCU flavor, - which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y. + which is RCU-sched for PREEMPTION=n and RCU-preempt for PREEMPTION=y. If the updater uses call_rcu() or synchronize_rcu(), then the corresponding readers may use rcu_read_lock() and rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(), diff --git a/Documentation/RCU/rcubarrier.rst b/Documentation/RCU/rcubarrier.rst index f64f4413a47c..3b4a24877496 100644 --- a/Documentation/RCU/rcubarrier.rst +++ b/Documentation/RCU/rcubarrier.rst @@ -9,7 +9,7 @@ RCU (read-copy update) is a synchronization mechanism that can be thought of as a replacement for read-writer locking (among other things), but with very low-overhead readers that are immune to deadlock, priority inversion, and unbounded latency. RCU read-side critical sections are delimited -by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPT +by rcu_read_lock() and rcu_read_unlock(), which, in non-CONFIG_PREEMPTION kernels, generate no code whatsoever. This means that RCU writers are unaware of the presence of concurrent @@ -329,10 +329,10 @@ Answer: This cannot happen. The reason is that on_each_cpu() has its last to smp_call_function() and further to smp_call_function_on_cpu(), causing this latter to spin until the cross-CPU invocation of rcu_barrier_func() has completed. This by itself would prevent - a grace period from completing on non-CONFIG_PREEMPT kernels, + a grace period from completing on non-CONFIG_PREEMPTION kernels, since each CPU must undergo a context switch (or other quiescent state) before the grace period can complete. However, this is - of no use in CONFIG_PREEMPT kernels. + of no use in CONFIG_PREEMPTION kernels. Therefore, on_each_cpu() disables preemption across its call to smp_call_function() and also across the local call to diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst index c9ab6af4d3be..e97d1b4876ef 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -25,7 +25,7 @@ warnings: - A CPU looping with bottom halves disabled. -- For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel +- For !CONFIG_PREEMPTION kernels, a CPU looping anywhere in the kernel without invoking schedule(). If the looping in the kernel is really expected and desirable behavior, you might need to add some calls to cond_resched(). @@ -44,7 +44,7 @@ warnings: result in the ``rcu_.*kthread starved for`` console-log message, which will include additional debugging information. -- A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might +- A CPU-bound real-time task in a CONFIG_PREEMPTION kernel, which might happen to preempt a low-priority task in the middle of an RCU read-side critical section. This is especially damaging if that low-priority task is not permitted to run on any other CPU, diff --git a/Documentation/RCU/whatisRCU.rst b/Documentation/RCU/whatisRCU.rst index 1a4723f48bd9..17e95ab2a201 100644 --- a/Documentation/RCU/whatisRCU.rst +++ b/Documentation/RCU/whatisRCU.rst @@ -683,7 +683,7 @@ Quick Quiz #1: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This section presents a "toy" RCU implementation that is based on "classic RCU". It is also short on performance (but only for updates) and -on features such as hotplug CPU and the ability to run in CONFIG_PREEMPT +on features such as hotplug CPU and the ability to run in CONFIG_PREEMPTION kernels. The definitions of rcu_dereference() and rcu_assign_pointer() are the same as those shown in the preceding section, so they are omitted. :: @@ -739,7 +739,7 @@ Quick Quiz #2: Quick Quiz #3: If it is illegal to block in an RCU read-side critical section, what the heck do you do in - PREEMPT_RT, where normal spinlocks can block??? + CONFIG_PREEMPT_RT, where normal spinlocks can block??? :ref:`Answers to Quick Quiz <8_whatisRCU>` @@ -1093,7 +1093,7 @@ Quick Quiz #2: overhead is **negative**. Answer: - Imagine a single-CPU system with a non-CONFIG_PREEMPT + Imagine a single-CPU system with a non-CONFIG_PREEMPTION kernel where a routing table is used by process-context code, but can be updated by irq-context code (for example, by an "ICMP REDIRECT" packet). The usual way of handling @@ -1120,10 +1120,10 @@ Answer: Quick Quiz #3: If it is illegal to block in an RCU read-side critical section, what the heck do you do in - PREEMPT_RT, where normal spinlocks can block??? + CONFIG_PREEMPT_RT, where normal spinlocks can block??? Answer: - Just as PREEMPT_RT permits preemption of spinlock + Just as CONFIG_PREEMPT_RT permits preemption of spinlock critical sections, it permits preemption of RCU read-side critical sections. It also permits spinlocks blocking while in RCU read-side critical From c2e13112e830c06825339cbadf0b3bc2bdb9a716 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Tue, 3 Nov 2020 09:26:03 -0500 Subject: [PATCH 057/130] rcu/segcblist: Add additional comments to explain smp_mb() One counter-intuitive property of RCU is the fact that full memory barriers are needed both before and after updates to the full (non-segmented) length. This patch therefore helps to assist the reader's intuition by adding appropriate comments. [ paulmck: Wordsmithing. ] Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.c | 68 +++++++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index bb246d8c6ef1..3cff8003f707 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -94,17 +94,77 @@ static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v) * field to disagree with the actual number of callbacks on the structure. * This increase is fully ordered with respect to the callers accesses * both before and after. + * + * So why on earth is a memory barrier required both before and after + * the update to the ->len field??? + * + * The reason is that rcu_barrier() locklessly samples each CPU's ->len + * field, and if a given CPU's field is zero, avoids IPIing that CPU. + * This can of course race with both queuing and invoking of callbacks. + * Failing to correctly handle either of these races could result in + * rcu_barrier() failing to IPI a CPU that actually had callbacks queued + * which rcu_barrier() was obligated to wait on. And if rcu_barrier() + * failed to wait on such a callback, unloading certain kernel modules + * would result in calls to functions whose code was no longer present in + * the kernel, for but one example. + * + * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully + * ordered with respect with both list modifications and the rcu_barrier(). + * + * The queuing case is CASE 1 and the invoking case is CASE 2. + * + * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes + * call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field + * will transition from 0->1, which is one of the transitions that must + * be handled carefully. Without the full memory barriers after the ->len + * update and at the beginning of rcu_barrier(), the following could happen: + * + * CPU 0 CPU 1 + * + * call_rcu(). + * rcu_barrier() sees ->len as 0. + * set ->len = 1. + * rcu_barrier() does nothing. + * module is unloaded. + * callback invokes unloaded function! + * + * With the full barriers, any case where rcu_barrier() sees ->len as 0 will + * have unambiguously preceded the return from the racing call_rcu(), which + * means that this call_rcu() invocation is OK to not wait on. After all, + * you are supposed to make sure that any problematic call_rcu() invocations + * happen before the rcu_barrier(). + * + * + * CASE 2: Suppose that CPU 0 is invoking its last callback just as + * CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from + * 1->0, which is one of the transitions that must be handled carefully. + * Without the full memory barriers before the ->len update and at the + * end of rcu_barrier(), the following could happen: + * + * CPU 0 CPU 1 + * + * start invoking last callback + * set ->len = 0 (reordered) + * rcu_barrier() sees ->len as 0 + * rcu_barrier() does nothing. + * module is unloaded + * callback executing after unloaded! + * + * With the full barriers, any case where rcu_barrier() sees ->len as 0 + * will be fully ordered after the completion of the callback function, + * so that the module unloading operation is completely safe. + * */ void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v) { #ifdef CONFIG_RCU_NOCB_CPU - smp_mb__before_atomic(); /* Up to the caller! */ + smp_mb__before_atomic(); // Read header comment above. atomic_long_add(v, &rsclp->len); - smp_mb__after_atomic(); /* Up to the caller! */ + smp_mb__after_atomic(); // Read header comment above. #else - smp_mb(); /* Up to the caller! */ + smp_mb(); // Read header comment above. WRITE_ONCE(rsclp->len, rsclp->len + v); - smp_mb(); /* Up to the caller! */ + smp_mb(); // Read header comment above. #endif } From ae5c2341ed3987bd434ed495bd4f3d8b2bc3e623 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 23 Sep 2020 11:22:09 -0400 Subject: [PATCH 058/130] rcu/segcblist: Add counters to segcblist datastructure Add counting of segment lengths of segmented callback list. This will be useful for a number of things such as knowing how big the ready-to-execute segment have gotten. The immediate benefit is ability to trace how the callbacks in the segmented callback list change. Also this patch remove hacks related to using donecbs's ->len field as a temporary variable to save the segmented callback list's length. This cannot be done anymore and is not needed. Also fix SRCU: The negative counting of the unsegmented list cannot be used to adjust the segmented one. To fix this, sample the unsegmented length in advance, and use it after CB execution to adjust the segmented list's length. Reviewed-by: Frederic Weisbecker Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 1 + kernel/rcu/rcu_segcblist.c | 120 ++++++++++++++++++++++------------ kernel/rcu/rcu_segcblist.h | 2 - kernel/rcu/srcutree.c | 5 +- 4 files changed, 82 insertions(+), 46 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index b36afe7b22c9..6c01f09a6456 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -72,6 +72,7 @@ struct rcu_segcblist { #else long len; #endif + long seglen[RCU_CBLIST_NSEGS]; u8 enabled; u8 offloaded; }; diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 3cff8003f707..777780447887 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -7,10 +7,10 @@ * Authors: Paul E. McKenney */ -#include -#include +#include #include -#include +#include +#include #include "rcu_segcblist.h" @@ -88,6 +88,46 @@ static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v) #endif } +/* Get the length of a segment of the rcu_segcblist structure. */ +static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg) +{ + return READ_ONCE(rsclp->seglen[seg]); +} + +/* Set the length of a segment of the rcu_segcblist structure. */ +static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v) +{ + WRITE_ONCE(rsclp->seglen[seg], v); +} + +/* Increase the numeric length of a segment by a specified amount. */ +static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v) +{ + WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v); +} + +/* Move from's segment length to to's segment. */ +static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to) +{ + long len; + + if (from == to) + return; + + len = rcu_segcblist_get_seglen(rsclp, from); + if (!len) + return; + + rcu_segcblist_add_seglen(rsclp, to, len); + rcu_segcblist_set_seglen(rsclp, from, 0); +} + +/* Increment segment's length. */ +static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg) +{ + rcu_segcblist_add_seglen(rsclp, seg, 1); +} + /* * Increase the numeric length of an rcu_segcblist structure by the * specified amount, which can be negative. This can cause the ->len @@ -179,26 +219,6 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp) rcu_segcblist_add_len(rsclp, 1); } -/* - * Exchange the numeric length of the specified rcu_segcblist structure - * with the specified value. This can cause the ->len field to disagree - * with the actual number of callbacks on the structure. This exchange is - * fully ordered with respect to the callers accesses both before and after. - */ -static long rcu_segcblist_xchg_len(struct rcu_segcblist *rsclp, long v) -{ -#ifdef CONFIG_RCU_NOCB_CPU - return atomic_long_xchg(&rsclp->len, v); -#else - long ret = rsclp->len; - - smp_mb(); /* Up to the caller! */ - WRITE_ONCE(rsclp->len, v); - smp_mb(); /* Up to the caller! */ - return ret; -#endif -} - /* * Initialize an rcu_segcblist structure. */ @@ -209,8 +229,10 @@ void rcu_segcblist_init(struct rcu_segcblist *rsclp) BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); rsclp->head = NULL; - for (i = 0; i < RCU_CBLIST_NSEGS; i++) + for (i = 0; i < RCU_CBLIST_NSEGS; i++) { rsclp->tails[i] = &rsclp->head; + rcu_segcblist_set_seglen(rsclp, i, 0); + } rcu_segcblist_set_len(rsclp, 0); rsclp->enabled = 1; } @@ -306,6 +328,7 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, { rcu_segcblist_inc_len(rsclp); smp_mb(); /* Ensure counts are updated before callback is enqueued. */ + rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL); rhp->next = NULL; WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); @@ -334,27 +357,13 @@ bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) if (rsclp->tails[i] != rsclp->tails[i - 1]) break; + rcu_segcblist_inc_seglen(rsclp, i); WRITE_ONCE(*rsclp->tails[i], rhp); for (; i <= RCU_NEXT_TAIL; i++) WRITE_ONCE(rsclp->tails[i], &rhp->next); return true; } -/* - * Extract only the counts from the specified rcu_segcblist structure, - * and place them in the specified rcu_cblist structure. This function - * supports both callback orphaning and invocation, hence the separation - * of counts and callbacks. (Callbacks ready for invocation must be - * orphaned and adopted separately from pending callbacks, but counts - * apply to all callbacks. Locking must be used to make sure that - * both orphaned-callbacks lists are consistent.) - */ -void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp) -{ - rclp->len = rcu_segcblist_xchg_len(rsclp, 0); -} - /* * Extract only those callbacks ready to be invoked from the specified * rcu_segcblist structure and place them in the specified rcu_cblist @@ -367,6 +376,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, if (!rcu_segcblist_ready_cbs(rsclp)) return; /* Nothing to do. */ + rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL); *rclp->tail = rsclp->head; WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]); WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); @@ -374,6 +384,7 @@ void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) WRITE_ONCE(rsclp->tails[i], &rsclp->head); + rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0); } /* @@ -390,11 +401,15 @@ void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, if (!rcu_segcblist_pend_cbs(rsclp)) return; /* Nothing to do. */ + rclp->len = 0; *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); - for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) + for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) { + rclp->len += rcu_segcblist_get_seglen(rsclp, i); WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]); + rcu_segcblist_set_seglen(rsclp, i, 0); + } } /* @@ -405,7 +420,6 @@ void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, struct rcu_cblist *rclp) { rcu_segcblist_add_len(rsclp, rclp->len); - rclp->len = 0; } /* @@ -419,6 +433,7 @@ void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, if (!rclp->head) return; /* No callbacks to move. */ + rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len); *rclp->tail = rsclp->head; WRITE_ONCE(rsclp->head, rclp->head); for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) @@ -439,6 +454,8 @@ void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, { if (!rclp->head) return; /* Nothing to do. */ + + rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len); WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head); WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail); } @@ -463,6 +480,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) break; WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]); + rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL); } /* If no callbacks moved, nothing more need be done. */ @@ -483,6 +501,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) break; /* No more callbacks. */ WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]); + rcu_segcblist_move_seglen(rsclp, i, j); rsclp->gp_seq[j] = rsclp->gp_seq[i]; } } @@ -504,7 +523,7 @@ void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) */ bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) { - int i; + int i, j; WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) @@ -547,6 +566,10 @@ bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL) return false; + /* Accounting: everything below i is about to get merged into i. */ + for (j = i + 1; j <= RCU_NEXT_TAIL; j++) + rcu_segcblist_move_seglen(rsclp, j, i); + /* * Merge all later callbacks, including newly arrived callbacks, * into the segment located by the for-loop above. Assign "seq" @@ -574,13 +597,24 @@ void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, struct rcu_cblist donecbs; struct rcu_cblist pendcbs; + lockdep_assert_cpus_held(); + rcu_cblist_init(&donecbs); rcu_cblist_init(&pendcbs); - rcu_segcblist_extract_count(src_rsclp, &donecbs); + rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs); rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs); + + /* + * No need smp_mb() before setting length to 0, because CPU hotplug + * lock excludes rcu_barrier. + */ + rcu_segcblist_set_len(src_rsclp, 0); + rcu_segcblist_insert_count(dst_rsclp, &donecbs); + rcu_segcblist_insert_count(dst_rsclp, &pendcbs); rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs); rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs); + rcu_segcblist_init(src_rsclp); } diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 1d2d61406463..cd35c9faaf51 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -89,8 +89,6 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, struct rcu_head *rhp); bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, struct rcu_head *rhp); -void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, - struct rcu_cblist *rclp); void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, struct rcu_cblist *rclp); void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 0f23d20d485a..79b7081143a7 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -1160,6 +1160,7 @@ static void srcu_advance_state(struct srcu_struct *ssp) */ static void srcu_invoke_callbacks(struct work_struct *work) { + long len; bool more; struct rcu_cblist ready_cbs; struct rcu_head *rhp; @@ -1182,6 +1183,7 @@ static void srcu_invoke_callbacks(struct work_struct *work) /* We are on the job! Extract and invoke ready callbacks. */ sdp->srcu_cblist_invoking = true; rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); + len = ready_cbs.len; spin_unlock_irq_rcu_node(sdp); rhp = rcu_cblist_dequeue(&ready_cbs); for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { @@ -1190,13 +1192,14 @@ static void srcu_invoke_callbacks(struct work_struct *work) rhp->func(rhp); local_bh_enable(); } + WARN_ON_ONCE(ready_cbs.len); /* * Update counts, accelerate new callbacks, and if needed, * schedule another round of callback invocation. */ spin_lock_irq_rcu_node(sdp); - rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); + rcu_segcblist_add_len(&sdp->srcu_cblist, -len); (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, rcu_seq_snap(&ssp->srcu_gp_seq)); sdp->srcu_cblist_invoking = false; From 68804cf1c905ce227e4e1d0bc252c216811c59fd Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 14 Oct 2020 18:21:53 -0400 Subject: [PATCH 059/130] rcu/tree: segcblist: Remove redundant smp_mb()s The full memory barriers in rcu_segcblist_enqueue() and in rcu_do_batch() are not needed because rcu_segcblist_add_len(), and thus also rcu_segcblist_inc_len(), already includes a memory barrier *before* and *after* the length of the list is updated. This commit therefore removes these redundant smp_mb() invocations. Reviewed-by: Frederic Weisbecker Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.c | 1 - kernel/rcu/tree.c | 1 - 2 files changed, 2 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 777780447887..1e80a0a9036a 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -327,7 +327,6 @@ void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, struct rcu_head *rhp) { rcu_segcblist_inc_len(rsclp); - smp_mb(); /* Ensure counts are updated before callback is enqueued. */ rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL); rhp->next = NULL; WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index cc6f379c7ddf..b0fb654cba80 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2523,7 +2523,6 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Update counts and requeue any remaining callbacks. */ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); - smp_mb(); /* List handling before counting for rcu_barrier(). */ rcu_segcblist_add_len(&rdp->cblist, -count); /* Reinstate batch limit if we have worked down the excess. */ From 3afe7fa535491ecd0382c3968dc2349602bff8a2 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Sat, 14 Nov 2020 14:31:32 -0500 Subject: [PATCH 060/130] rcu/trace: Add tracing for how segcb list changes This commit adds tracing to track how the segcb list changes before/after acceleration, during queuing and during dequeuing. This tracing helped discover an optimization that avoided needless GP requests when no callbacks were accelerated. The tracing overhead is minimal as each segment's length is now stored in the respective segment. Reviewed-by: Frederic Weisbecker Reviewed-by: Neeraj Upadhyay Signed-off-by: Joel Fernandes (Google) Signed-off-by: Paul E. McKenney --- include/trace/events/rcu.h | 26 ++++++++++++++++++++++++++ kernel/rcu/tree.c | 9 +++++++++ 2 files changed, 35 insertions(+) diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 155b5cb43cfd..5fc29400e1a2 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -505,6 +505,32 @@ TRACE_EVENT_RCU(rcu_callback, __entry->qlen) ); +TRACE_EVENT_RCU(rcu_segcb_stats, + + TP_PROTO(struct rcu_segcblist *rs, const char *ctx), + + TP_ARGS(rs, ctx), + + TP_STRUCT__entry( + __field(const char *, ctx) + __array(unsigned long, gp_seq, RCU_CBLIST_NSEGS) + __array(long, seglen, RCU_CBLIST_NSEGS) + ), + + TP_fast_assign( + __entry->ctx = ctx; + memcpy(__entry->seglen, rs->seglen, RCU_CBLIST_NSEGS * sizeof(long)); + memcpy(__entry->gp_seq, rs->gp_seq, RCU_CBLIST_NSEGS * sizeof(unsigned long)); + + ), + + TP_printk("%s seglen: (DONE=%ld, WAIT=%ld, NEXT_READY=%ld, NEXT=%ld) " + "gp_seq: (DONE=%lu, WAIT=%lu, NEXT_READY=%lu, NEXT=%lu)", __entry->ctx, + __entry->seglen[0], __entry->seglen[1], __entry->seglen[2], __entry->seglen[3], + __entry->gp_seq[0], __entry->gp_seq[1], __entry->gp_seq[2], __entry->gp_seq[3]) + +); + /* * Tracepoint for the registration of a single RCU callback of the special * kvfree() form. The first argument is the RCU type, the second argument diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b0fb654cba80..6bf269c91393 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1495,6 +1495,8 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) if (!rcu_segcblist_pend_cbs(&rdp->cblist)) return false; + trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc")); + /* * Callbacks are often registered with incomplete grace-period * information. Something about the fact that getting exact @@ -1515,6 +1517,8 @@ static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) else trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB")); + trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc")); + return ret; } @@ -2471,11 +2475,14 @@ static void rcu_do_batch(struct rcu_data *rdp) rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); if (offloaded) rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); + + trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued")); rcu_nocb_unlock_irqrestore(rdp, flags); /* Invoke callbacks. */ tick_dep_set_task(current, TICK_DEP_BIT_RCU); rhp = rcu_cblist_dequeue(&rcl); + for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { rcu_callback_t f; @@ -2987,6 +2994,8 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) trace_rcu_callback(rcu_state.name, head, rcu_segcblist_n_cbs(&rdp->cblist)); + trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued")); + /* Go handle any RCU core processing required. */ if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ From b4e6039e8af8c20dfbbdfcaebfcbd7c9d9ffe713 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Wed, 18 Nov 2020 11:15:41 -0500 Subject: [PATCH 061/130] rcu/segcblist: Add debug checks for segment lengths This commit adds debug checks near the end of rcu_do_batch() that emit warnings if an empty rcu_segcblist structure has non-zero segment counts, or, conversely, if a non-empty structure has all-zero segment counts. Signed-off-by: Joel Fernandes (Google) [ paulmck: Fix queue/segment-length checks. ] Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.c | 12 ++++++++++++ kernel/rcu/rcu_segcblist.h | 3 +++ kernel/rcu/tree.c | 8 ++++++-- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 1e80a0a9036a..89e0dff890bf 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -94,6 +94,18 @@ static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg) return READ_ONCE(rsclp->seglen[seg]); } +/* Return number of callbacks in segmented callback list by summing seglen. */ +long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp) +{ + long len = 0; + int i; + + for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) + len += rcu_segcblist_get_seglen(rsclp, i); + + return len; +} + /* Set the length of a segment of the rcu_segcblist structure. */ static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v) { diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index cd35c9faaf51..18e101de8747 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -15,6 +15,9 @@ static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) return READ_ONCE(rclp->len); } +/* Return number of callbacks in segmented callback list by summing seglen. */ +long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp); + void rcu_cblist_init(struct rcu_cblist *rclp); void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp); void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 6bf269c91393..8086c0467c15 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2434,6 +2434,7 @@ int rcutree_dead_cpu(unsigned int cpu) static void rcu_do_batch(struct rcu_data *rdp) { int div; + bool __maybe_unused empty; unsigned long flags; const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); struct rcu_head *rhp; @@ -2548,9 +2549,12 @@ static void rcu_do_batch(struct rcu_data *rdp) * The following usually indicates a double call_rcu(). To track * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. */ - WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); + empty = rcu_segcblist_empty(&rdp->cblist); + WARN_ON_ONCE(count == 0 && !empty); WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - count != 0 && rcu_segcblist_empty(&rdp->cblist)); + count != 0 && empty); + WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0); + WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0); rcu_nocb_unlock_irqrestore(rdp, flags); From 65e560327fe68153a9ad7452d5fd3171a1927d33 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:16 +0100 Subject: [PATCH 062/130] rcu/nocb: Turn enabled/offload states into a common flag This commit gathers the rcu_segcblist ->enabled and ->offloaded property field into a single ->flags bitmask to avoid further proliferation of individual u8 fields in the structure. This change prepares for the state formerly known as ->offloaded state to be modified at runtime. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 6 ++++-- kernel/rcu/rcu_segcblist.c | 6 +++--- kernel/rcu/rcu_segcblist.h | 23 +++++++++++++++++++++-- 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 6c01f09a6456..4714b0263c76 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -63,6 +63,9 @@ struct rcu_cblist { #define RCU_NEXT_TAIL 3 #define RCU_CBLIST_NSEGS 4 +#define SEGCBLIST_ENABLED BIT(0) +#define SEGCBLIST_OFFLOADED BIT(1) + struct rcu_segcblist { struct rcu_head *head; struct rcu_head **tails[RCU_CBLIST_NSEGS]; @@ -73,8 +76,7 @@ struct rcu_segcblist { long len; #endif long seglen[RCU_CBLIST_NSEGS]; - u8 enabled; - u8 offloaded; + u8 flags; }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 89e0dff890bf..934945d72ca5 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -246,7 +246,7 @@ void rcu_segcblist_init(struct rcu_segcblist *rsclp) rcu_segcblist_set_seglen(rsclp, i, 0); } rcu_segcblist_set_len(rsclp, 0); - rsclp->enabled = 1; + rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED); } /* @@ -257,7 +257,7 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) { WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); - rsclp->enabled = 0; + rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED); } /* @@ -266,7 +266,7 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) */ void rcu_segcblist_offload(struct rcu_segcblist *rsclp) { - rsclp->offloaded = 1; + rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); } /* diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 18e101de8747..ff372db09f8b 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -53,19 +53,38 @@ static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) #endif } +static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp, + int flags) +{ + rsclp->flags |= flags; +} + +static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp, + int flags) +{ + rsclp->flags &= ~flags; +} + +static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp, + int flags) +{ + return READ_ONCE(rsclp->flags) & flags; +} + /* * Is the specified rcu_segcblist enabled, for example, not corresponding * to an offline CPU? */ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) { - return rsclp->enabled; + return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); } /* Is the specified rcu_segcblist offloaded? */ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) { - return IS_ENABLED(CONFIG_RCU_NOCB_CPU) && rsclp->offloaded; + return IS_ENABLED(CONFIG_RCU_NOCB_CPU) && + rcu_segcblist_test_flags(rsclp, SEGCBLIST_OFFLOADED); } /* From 8d346d438f93b5344e99d429727ec9c2f392d4ec Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:17 +0100 Subject: [PATCH 063/130] rcu/nocb: Provide basic callback offloading state machine bits Offloading and de-offloading RCU callback processes must be done carefully. There must never be a time at which callback processing is disabled because the task driving the offloading or de-offloading might be preempted or otherwise stalled at that point in time, which would result in OOM due to calbacks piling up indefinitely. This implies that there will be times during which a given CPU's callbacks might be concurrently invoked by both that CPU's RCU_SOFTIRQ handler (or, equivalently, that CPU's rcuc kthread) and by that CPU's rcuo kthread. This situation could fatally confuse both rcu_barrier() and the CPU-hotplug offlining process, so these must be excluded during any concurrent-callback-invocation period. In addition, during times of concurrent callback invocation, changes to ->cblist must be protected both as needed for RCU_SOFTIRQ and as needed for the rcuo kthread. This commit therefore defines and documents the states for a state machine that coordinates offloading and deoffloading. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- include/linux/rcu_segcblist.h | 115 +++++++++++++++++++++++++++++++++- kernel/rcu/rcu_segcblist.c | 1 + kernel/rcu/rcu_segcblist.h | 12 +++- kernel/rcu/tree.c | 3 + 4 files changed, 128 insertions(+), 3 deletions(-) diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 4714b0263c76..8afe886e85f1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -63,8 +63,121 @@ struct rcu_cblist { #define RCU_NEXT_TAIL 3 #define RCU_CBLIST_NSEGS 4 + +/* + * ==NOCB Offloading state machine== + * + * + * ---------------------------------------------------------------------------- + * | SEGCBLIST_SOFTIRQ_ONLY | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, without holding nocb_lock. | + * ---------------------------------------------------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | SEGCBLIST_OFFLOADED | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, | + * | allowing nocb_timer to be armed. | + * ---------------------------------------------------------------------------- + * | + * v + * ----------------------------------- + * | | + * v v + * --------------------------------------- ----------------------------------| + * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP | + * | | | | + * | | | | + * | CB kthread woke up and | | GP kthread woke up and | + * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED| + * | Processes callbacks concurrently | | | + * | with rcu_core(), holding | | | + * | nocb_lock. | | | + * --------------------------------------- ----------------------------------- + * | | + * ----------------------------------- + * | + * v + * |--------------------------------------------------------------------------| + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops | + * | handling callbacks. | + * ---------------------------------------------------------------------------- + */ + + + +/* + * ==NOCB De-Offloading state machine== + * + * + * |--------------------------------------------------------------------------| + * | SEGCBLIST_OFFLOADED | | + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() | + * | ignores callbacks. | + * ---------------------------------------------------------------------------- + * | + * v + * |--------------------------------------------------------------------------| + * | SEGCBLIST_KTHREAD_CB | | + * | SEGCBLIST_KTHREAD_GP | + * | | + * | CB/GP kthreads and local rcu_core() handle callbacks concurrently | + * | holding nocb_lock. Wake up CB and GP kthreads if necessary. | + * ---------------------------------------------------------------------------- + * | + * v + * ----------------------------------- + * | | + * v v + * ---------------------------------------------------------------------------| + * | | + * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | + * | | | + * | GP kthread woke up and | CB kthread woke up and | + * | acknowledged the fact that | acknowledged the fact that | + * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. | + * | | The CB kthread goes to sleep | + * | The callbacks from the target CPU | until it ever gets re-offloaded. | + * | will be ignored from the GP kthread | | + * | loop. | | + * ---------------------------------------------------------------------------- + * | | + * ----------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | 0 | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. | + * | Flush pending nocb_timer. Flush nocb bypass callbacks. | + * ---------------------------------------------------------------------------- + * | + * v + * ---------------------------------------------------------------------------- + * | SEGCBLIST_SOFTIRQ_ONLY | + * | | + * | Callbacks processed by rcu_core() from softirqs or local | + * | rcuc kthread, without holding nocb_lock. | + * ---------------------------------------------------------------------------- + */ #define SEGCBLIST_ENABLED BIT(0) -#define SEGCBLIST_OFFLOADED BIT(1) +#define SEGCBLIST_SOFTIRQ_ONLY BIT(1) +#define SEGCBLIST_KTHREAD_CB BIT(2) +#define SEGCBLIST_KTHREAD_GP BIT(3) +#define SEGCBLIST_OFFLOADED BIT(4) struct rcu_segcblist { struct rcu_head *head; diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 934945d72ca5..7fc6362625b2 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -266,6 +266,7 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) */ void rcu_segcblist_offload(struct rcu_segcblist *rsclp) { + rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); } diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index ff372db09f8b..e05952ab9b87 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -83,8 +83,16 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) /* Is the specified rcu_segcblist offloaded? */ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) { - return IS_ENABLED(CONFIG_RCU_NOCB_CPU) && - rcu_segcblist_test_flags(rsclp, SEGCBLIST_OFFLOADED); + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { + /* + * Complete de-offloading happens only when SEGCBLIST_SOFTIRQ_ONLY + * is set. + */ + if (!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) + return true; + } + + return false; } /* diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8086c0467c15..7cfc2e8a2500 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -83,6 +83,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = { .dynticks_nesting = 1, .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE, .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR), +#ifdef CONFIG_RCU_NOCB_CPU + .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY, +#endif }; static struct rcu_state rcu_state = { .level = { &rcu_state.node[0] }, From 126d9d49528dae792859e5f11f3b447ce8a9a9b4 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:18 +0100 Subject: [PATCH 064/130] rcu/nocb: Always init segcblist on CPU up How the rdp->cblist enabled state is treated at CPU-hotplug time depends on whether or not that ->cblist is offloaded. 1) Not offloaded: The ->cblist is disabled when the CPU goes down. All its callbacks are migrated and none can to enqueued until after some later CPU-hotplug operation brings the CPU back up. 2) Offloaded: The ->cblist is not disabled on CPU down because the CB/GP kthreads must finish invoking the remaining callbacks. There is thus no need to re-enable it on CPU up. Since the ->cblist offloaded state is set in stone at boot, it cannot change between CPU down and CPU up. So 1) and 2) are symmetrical. However, given runtime toggling of the offloaded state, there are two additional asymmetrical scenarios: 3) The ->cblist is not offloaded when the CPU goes down. The ->cblist is later toggled to offloaded and then the CPU comes back up. 4) The ->cblist is offloaded when the CPU goes down. The ->cblist is later toggled to no longer be offloaded and then the CPU comes back up. Scenario 4) is currently handled correctly. The ->cblist remains enabled on CPU down and gets re-initialized on CPU up. The toggling operation will wait until ->cblist is empty, so ->cblist will remain empty until CPU-up time. The scenario 3) would run into trouble though, as the rdp is disabled on CPU down and not re-initialized/re-enabled on CPU up. Except that in this case, ->cblist is guaranteed to be empty because all its callbacks were migrated away at CPU-down time. And the CPU-up code already initializes and enables any empty ->cblist structures in order to handle the possibility of early-boot invocations of call_rcu() in the case where such invocations don't occur. So all that need be done is to adjust the locking. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7cfc2e8a2500..83362f6f1119 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4015,12 +4015,18 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp->blimit = blimit; - if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ - !rcu_segcblist_is_offloaded(&rdp->cblist)) - rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ rcu_dynticks_eqs_online(); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ + /* + * Lock in case the CB/GP kthreads are still around handling + * old callbacks (longer term we should flush all callbacks + * before completing CPU offline) + */ + rcu_nocb_lock(rdp); + if (rcu_segcblist_empty(&rdp->cblist)) /* No early-boot CBs? */ + rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ + rcu_nocb_unlock(rdp); /* * Add CPU to leaf rcu_node pending-online bitmask. Any needed From d97b078182406c0bd0aacd36fc0a693e118e608f Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:19 +0100 Subject: [PATCH 065/130] rcu/nocb: De-offloading CB kthread To de-offload callback processing back onto a CPU, it is necessary to clear SEGCBLIST_OFFLOAD and notify the nocb CB kthread, which will then clear its own bit flag and go to sleep to stop handling callbacks. This commit makes that change. It will also be necessary to notify the nocb GP kthread in this same way, which is the subject of a follow-on commit. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker [ paulmck: Add export per kernel test robot feedback. ] Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 + kernel/rcu/rcu_segcblist.c | 10 ++- kernel/rcu/rcu_segcblist.h | 2 +- kernel/rcu/tree.h | 1 + kernel/rcu/tree_plugin.h | 130 ++++++++++++++++++++++++++++++++----- 5 files changed, 123 insertions(+), 22 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de0826411311..40266eb418b6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -104,8 +104,10 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); +int rcu_nocb_cpu_deoffload(int cpu); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } +static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /** diff --git a/kernel/rcu/rcu_segcblist.c b/kernel/rcu/rcu_segcblist.c index 7fc6362625b2..7f181c9675f7 100644 --- a/kernel/rcu/rcu_segcblist.c +++ b/kernel/rcu/rcu_segcblist.c @@ -264,10 +264,14 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp) * Mark the specified rcu_segcblist structure as offloaded. This * structure must be empty. */ -void rcu_segcblist_offload(struct rcu_segcblist *rsclp) +void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) { - rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); - rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); + if (offload) { + rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); + rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); + } else { + rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); + } } /* diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index e05952ab9b87..28c9a5225afc 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -109,7 +109,7 @@ void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v); void rcu_segcblist_init(struct rcu_segcblist *rsclp); void rcu_segcblist_disable(struct rcu_segcblist *rsclp); -void rcu_segcblist_offload(struct rcu_segcblist *rsclp); +void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload); bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 7708ed161f4a..e0deb4829847 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -201,6 +201,7 @@ struct rcu_data { /* 5) Callback offloading. */ #ifdef CONFIG_RCU_NOCB_CPU struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */ + struct swait_queue_head nocb_state_wq; /* For offloading state changes */ struct task_struct *nocb_gp_kthread; raw_spinlock_t nocb_lock; /* Guard following pair of fields. */ atomic_t nocb_lock_contended; /* Contention experienced. */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 7e291ce0a1d6..1b870d0d2445 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2081,16 +2081,29 @@ static int rcu_nocb_gp_kthread(void *arg) return 0; } +static inline bool nocb_cb_can_run(struct rcu_data *rdp) +{ + u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB; + return rcu_segcblist_test_flags(&rdp->cblist, flags); +} + +static inline bool nocb_cb_wait_cond(struct rcu_data *rdp) +{ + return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep); +} + /* * Invoke any ready callbacks from the corresponding no-CBs CPU, * then, if there are no more, wait for more to appear. */ static void nocb_cb_wait(struct rcu_data *rdp) { + struct rcu_segcblist *cblist = &rdp->cblist; + struct rcu_node *rnp = rdp->mynode; + bool needwake_state = false; + bool needwake_gp = false; unsigned long cur_gp_seq; unsigned long flags; - bool needwake_gp = false; - struct rcu_node *rnp = rdp->mynode; local_irq_save(flags); rcu_momentary_dyntick_idle(); @@ -2100,32 +2113,50 @@ static void nocb_cb_wait(struct rcu_data *rdp) local_bh_enable(); lockdep_assert_irqs_enabled(); rcu_nocb_lock_irqsave(rdp, flags); - if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && + if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) && rcu_seq_done(&rnp->gp_seq, cur_gp_seq) && raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */ needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */ } - if (rcu_segcblist_ready_cbs(&rdp->cblist)) { - rcu_nocb_unlock_irqrestore(rdp, flags); - if (needwake_gp) - rcu_gp_kthread_wake(); - return; + + WRITE_ONCE(rdp->nocb_cb_sleep, true); + + if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { + if (rcu_segcblist_ready_cbs(cblist)) + WRITE_ONCE(rdp->nocb_cb_sleep, false); + } else { + /* + * De-offloading. Clear our flag and notify the de-offload worker. + * We won't touch the callbacks and keep sleeping until we ever + * get re-offloaded. + */ + WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB); + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) + needwake_state = true; } - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); - WRITE_ONCE(rdp->nocb_cb_sleep, true); + if (rdp->nocb_cb_sleep) + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); + rcu_nocb_unlock_irqrestore(rdp, flags); if (needwake_gp) rcu_gp_kthread_wake(); - swait_event_interruptible_exclusive(rdp->nocb_cb_wq, - !READ_ONCE(rdp->nocb_cb_sleep)); - if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */ + + if (needwake_state) + swake_up_one(&rdp->nocb_state_wq); + + do { + swait_event_interruptible_exclusive(rdp->nocb_cb_wq, + nocb_cb_wait_cond(rdp)); + /* ^^^ Ensure CB invocation follows _sleep test. */ - return; - } - WARN_ON(signal_pending(current)); - trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); + if (smp_load_acquire(&rdp->nocb_cb_sleep)) { + WARN_ON(signal_pending(current)); + trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); + } + } while (!nocb_cb_can_run(rdp)); } /* @@ -2187,6 +2218,67 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) do_nocb_deferred_wakeup_common(rdp); } +static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) +{ + struct rcu_segcblist *cblist = &rdp->cblist; + bool wake_cb = false; + unsigned long flags; + + printk("De-offloading %d\n", rdp->cpu); + + rcu_nocb_lock_irqsave(rdp, flags); + rcu_segcblist_offload(cblist, false); + + if (rdp->nocb_cb_sleep) { + rdp->nocb_cb_sleep = false; + wake_cb = true; + } + rcu_nocb_unlock_irqrestore(rdp, flags); + + if (wake_cb) + swake_up_one(&rdp->nocb_cb_wq); + + swait_event_exclusive(rdp->nocb_state_wq, + !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)); + + return 0; +} + +static long rcu_nocb_rdp_deoffload(void *arg) +{ + struct rcu_data *rdp = arg; + + WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id()); + return __rcu_nocb_rdp_deoffload(rdp); +} + +int rcu_nocb_cpu_deoffload(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + int ret = 0; + + if (rdp == rdp->nocb_gp_rdp) { + pr_info("Can't deoffload an rdp GP leader (yet)\n"); + return -EINVAL; + } + mutex_lock(&rcu_state.barrier_mutex); + cpus_read_lock(); + if (rcu_segcblist_is_offloaded(&rdp->cblist)) { + if (cpu_online(cpu)) { + ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp); + } else { + ret = __rcu_nocb_rdp_deoffload(rdp); + } + if (!ret) + cpumask_clear_cpu(cpu, rcu_nocb_mask); + } + cpus_read_unlock(); + mutex_unlock(&rcu_state.barrier_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload); + void __init rcu_init_nohz(void) { int cpu; @@ -2229,7 +2321,8 @@ void __init rcu_init_nohz(void) rdp = per_cpu_ptr(&rcu_data, cpu); if (rcu_segcblist_empty(&rdp->cblist)) rcu_segcblist_init(&rdp->cblist); - rcu_segcblist_offload(&rdp->cblist); + rcu_segcblist_offload(&rdp->cblist, true); + rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); } rcu_organize_nocb_kthreads(); } @@ -2239,6 +2332,7 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) { init_swait_queue_head(&rdp->nocb_cb_wq); init_swait_queue_head(&rdp->nocb_gp_wq); + init_swait_queue_head(&rdp->nocb_state_wq); raw_spin_lock_init(&rdp->nocb_lock); raw_spin_lock_init(&rdp->nocb_bypass_lock); raw_spin_lock_init(&rdp->nocb_gp_lock); From ef005345e6e49859e225f549c88c985e79477bb9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:20 +0100 Subject: [PATCH 066/130] rcu/nocb: Don't deoffload an offline CPU with pending work Offloaded CPUs do not migrate their callbacks, instead relying on their rcuo kthread to invoke them. But if the CPU is offline, it will be running neither its RCU_SOFTIRQ handler nor its rcuc kthread. This means that de-offloading an offline CPU that still has pending callbacks will strand those callbacks. This commit therefore refuses to toggle offline CPUs having pending callbacks. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Suggested-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 1b870d0d2445..b70cc91a7831 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2227,6 +2227,15 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) printk("De-offloading %d\n", rdp->cpu); rcu_nocb_lock_irqsave(rdp, flags); + /* + * If there are still pending work offloaded, the offline + * CPU won't help much handling them. + */ + if (cpu_is_offline(rdp->cpu) && !rcu_segcblist_empty(&rdp->cblist)) { + rcu_nocb_unlock_irqrestore(rdp, flags); + return -EBUSY; + } + rcu_segcblist_offload(cblist, false); if (rdp->nocb_cb_sleep) { From 5bb39dc956f3d4f1bb75b5962b503426c45340ae Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:21 +0100 Subject: [PATCH 067/130] rcu/nocb: De-offloading GP kthread To de-offload callback processing back onto a CPU, it is necessary to clear SEGCBLIST_OFFLOAD and notify the nocb GP kthread, which will then clear its own bit flag and ignore this CPU until further notice. Whichever of the nocb CB and nocb GP kthreads is last to clear its own bit notifies the de-offloading worker kthread. Once notified, this worker kthread can proceed safe in the knowledge that the nocb CB and GP kthreads will no longer be manipulating this CPU's RCU callback list. This commit makes this change. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 54 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 3 deletions(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index b70cc91a7831..fe46e7008667 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1928,6 +1928,33 @@ static void do_nocb_bypass_wakeup_timer(struct timer_list *t) __call_rcu_nocb_wake(rdp, true, flags); } +static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp) +{ + u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP; + + return rcu_segcblist_test_flags(&rdp->cblist, flags); +} + +static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_state) +{ + struct rcu_segcblist *cblist = &rdp->cblist; + + if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { + return true; + } else { + /* + * De-offloading. Clear our flag and notify the de-offload worker. + * We will ignore this rdp until it ever gets re-offloaded. + */ + WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP); + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) + *needwake_state = true; + return false; + } +} + + /* * No-CBs GP kthreads come here to wait for additional callbacks to show up * or for grace periods to end. @@ -1956,8 +1983,17 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) */ WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { + bool needwake_state = false; + if (!nocb_gp_enabled_cb(rdp)) + continue; trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); rcu_nocb_lock_irqsave(rdp, flags); + if (!nocb_gp_update_state(rdp, &needwake_state)) { + rcu_nocb_unlock_irqrestore(rdp, flags); + if (needwake_state) + swake_up_one(&rdp->nocb_state_wq); + continue; + } bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); if (bypass_ncbs && (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || @@ -2221,7 +2257,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) { struct rcu_segcblist *cblist = &rdp->cblist; - bool wake_cb = false; + struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; + bool wake_cb = false, wake_gp = false; unsigned long flags; printk("De-offloading %d\n", rdp->cpu); @@ -2247,9 +2284,19 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) if (wake_cb) swake_up_one(&rdp->nocb_cb_wq); - swait_event_exclusive(rdp->nocb_state_wq, - !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)); + raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + if (rdp_gp->nocb_gp_sleep) { + rdp_gp->nocb_gp_sleep = false; + wake_gp = true; + } + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); + if (wake_gp) + wake_up_process(rdp_gp->nocb_gp_kthread); + + swait_event_exclusive(rdp->nocb_state_wq, + !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | + SEGCBLIST_KTHREAD_GP)); return 0; } @@ -2332,6 +2379,7 @@ void __init rcu_init_nohz(void) rcu_segcblist_init(&rdp->cblist); rcu_segcblist_offload(&rdp->cblist, true); rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB); + rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP); } rcu_organize_nocb_kthreads(); } From 254e11efde66ca0a0ce0c99a62c377314b5984ff Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:22 +0100 Subject: [PATCH 068/130] rcu/nocb: Re-offload support To re-offload the callback processing off of a CPU, it is necessary to clear SEGCBLIST_SOFTIRQ_ONLY, set SEGCBLIST_OFFLOADED, and then notify both the CB and GP kthreads so that they both set their own bit flag and start processing the callbacks remotely. The re-offloading worker is then notified that it can stop the RCU_SOFTIRQ handler (or rcuc kthread, as the case may be) from processing the callbacks locally. Ordering must be carefully enforced so that the callbacks that used to be processed locally without locking will have the same ordering properties when they are invoked by the nocb CB and GP kthreads. This commit makes this change. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker [ paulmck: Export rcu_nocb_cpu_offload(). ] Signed-off-by: Paul E. McKenney --- include/linux/rcupdate.h | 2 + kernel/rcu/tree_plugin.h | 162 +++++++++++++++++++++++++++++++++------ 2 files changed, 140 insertions(+), 24 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 40266eb418b6..e0ee52e2756d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -104,9 +104,11 @@ static inline void rcu_user_exit(void) { } #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); +int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } +static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index fe46e7008667..03ae1ce4790f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1928,6 +1928,20 @@ static void do_nocb_bypass_wakeup_timer(struct timer_list *t) __call_rcu_nocb_wake(rdp, true, flags); } +/* + * Check if we ignore this rdp. + * + * We check that without holding the nocb lock but + * we make sure not to miss a freshly offloaded rdp + * with the current ordering: + * + * rdp_offload_toggle() nocb_gp_enabled_cb() + * ------------------------- ---------------------------- + * WRITE flags LOCK nocb_gp_lock + * LOCK nocb_gp_lock READ/WRITE nocb_gp_sleep + * READ/WRITE nocb_gp_sleep UNLOCK nocb_gp_lock + * UNLOCK nocb_gp_lock READ flags + */ static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp) { u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP; @@ -1940,6 +1954,11 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta struct rcu_segcblist *cblist = &rdp->cblist; if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) { + rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP); + if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) + *needwake_state = true; + } return true; } else { /* @@ -2003,6 +2022,8 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { rcu_nocb_unlock_irqrestore(rdp, flags); + if (needwake_state) + swake_up_one(&rdp->nocb_state_wq); continue; /* No callbacks here, try next. */ } if (bypass_ncbs) { @@ -2054,6 +2075,8 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) } if (needwake_gp) rcu_gp_kthread_wake(); + if (needwake_state) + swake_up_one(&rdp->nocb_state_wq); } my_rdp->nocb_gp_bypass = bypass; @@ -2159,6 +2182,11 @@ static void nocb_cb_wait(struct rcu_data *rdp) WRITE_ONCE(rdp->nocb_cb_sleep, true); if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) { + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) { + rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB); + if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) + needwake_state = true; + } if (rcu_segcblist_ready_cbs(cblist)) WRITE_ONCE(rdp->nocb_cb_sleep, false); } else { @@ -2254,12 +2282,44 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) do_nocb_deferred_wakeup_common(rdp); } -static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) +static int rdp_offload_toggle(struct rcu_data *rdp, + bool offload, unsigned long flags) + __releases(rdp->nocb_lock) { struct rcu_segcblist *cblist = &rdp->cblist; struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; - bool wake_cb = false, wake_gp = false; + bool wake_gp = false; + + rcu_segcblist_offload(cblist, offload); + + if (rdp->nocb_cb_sleep) + rdp->nocb_cb_sleep = false; + rcu_nocb_unlock_irqrestore(rdp, flags); + + /* + * Ignore former value of nocb_cb_sleep and force wake up as it could + * have been spuriously set to false already. + */ + swake_up_one(&rdp->nocb_cb_wq); + + raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); + if (rdp_gp->nocb_gp_sleep) { + rdp_gp->nocb_gp_sleep = false; + wake_gp = true; + } + raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); + + if (wake_gp) + wake_up_process(rdp_gp->nocb_gp_kthread); + + return 0; +} + +static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) +{ + struct rcu_segcblist *cblist = &rdp->cblist; unsigned long flags; + int ret; printk("De-offloading %d\n", rdp->cpu); @@ -2273,31 +2333,11 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) return -EBUSY; } - rcu_segcblist_offload(cblist, false); - - if (rdp->nocb_cb_sleep) { - rdp->nocb_cb_sleep = false; - wake_cb = true; - } - rcu_nocb_unlock_irqrestore(rdp, flags); - - if (wake_cb) - swake_up_one(&rdp->nocb_cb_wq); - - raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags); - if (rdp_gp->nocb_gp_sleep) { - rdp_gp->nocb_gp_sleep = false; - wake_gp = true; - } - raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags); - - if (wake_gp) - wake_up_process(rdp_gp->nocb_gp_kthread); - + ret = rdp_offload_toggle(rdp, false, flags); swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP)); - return 0; + return ret; } static long rcu_nocb_rdp_deoffload(void *arg) @@ -2335,6 +2375,80 @@ int rcu_nocb_cpu_deoffload(int cpu) } EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload); +static int __rcu_nocb_rdp_offload(struct rcu_data *rdp) +{ + struct rcu_segcblist *cblist = &rdp->cblist; + unsigned long flags; + int ret; + + /* + * For now we only support re-offload, ie: the rdp must have been + * offloaded on boot first. + */ + if (!rdp->nocb_gp_rdp) + return -EINVAL; + + printk("Offloading %d\n", rdp->cpu); + /* + * Can't use rcu_nocb_lock_irqsave() while we are in + * SEGCBLIST_SOFTIRQ_ONLY mode. + */ + raw_spin_lock_irqsave(&rdp->nocb_lock, flags); + /* + * We didn't take the nocb lock while working on the + * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. + * Every modifications that have been done previously on + * rdp->cblist must be visible remotely by the nocb kthreads + * upon wake up after reading the cblist flags. + * + * The layout against nocb_lock enforces that ordering: + * + * __rcu_nocb_rdp_offload() nocb_cb_wait()/nocb_gp_wait() + * ------------------------- ---------------------------- + * WRITE callbacks rcu_nocb_lock() + * rcu_nocb_lock() READ flags + * WRITE flags READ callbacks + * rcu_nocb_unlock() rcu_nocb_unlock() + */ + ret = rdp_offload_toggle(rdp, true, flags); + swait_event_exclusive(rdp->nocb_state_wq, + rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) && + rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); + + return ret; +} + +static long rcu_nocb_rdp_offload(void *arg) +{ + struct rcu_data *rdp = arg; + + WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id()); + return __rcu_nocb_rdp_offload(rdp); +} + +int rcu_nocb_cpu_offload(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + int ret = 0; + + mutex_lock(&rcu_state.barrier_mutex); + cpus_read_lock(); + if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { + if (cpu_online(cpu)) { + ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp); + } else { + ret = __rcu_nocb_rdp_offload(rdp); + } + if (!ret) + cpumask_set_cpu(cpu, rcu_nocb_mask); + } + cpus_read_unlock(); + mutex_unlock(&rcu_state.barrier_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload); + void __init rcu_init_nohz(void) { int cpu; From 69cdea873cde261586a2cae2440178df1a313bbe Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:23 +0100 Subject: [PATCH 069/130] rcu/nocb: Shutdown nocb timer on de-offloading This commit ensures that the nocb timer is shut down before reaching the final de-offloaded state. The key goal is to prevent the timer handler from manipulating the callbacks without the protection of the nocb locks. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.h | 1 + kernel/rcu/tree_plugin.h | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e0deb4829847..5d359b9f9fec 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -257,6 +257,7 @@ struct rcu_data { }; /* Values for nocb_defer_wakeup field in struct rcu_data. */ +#define RCU_NOCB_WAKE_OFF -1 #define RCU_NOCB_WAKE_NOT 0 #define RCU_NOCB_WAKE 1 #define RCU_NOCB_WAKE_FORCE 2 diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 03ae1ce4790f..c88ad628595b 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1665,6 +1665,8 @@ static void wake_nocb_gp(struct rcu_data *rdp, bool force, static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, const char *reason) { + if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_OFF) + return; if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) mod_timer(&rdp->nocb_timer, jiffies + 1); if (rdp->nocb_defer_wakeup < waketype) @@ -2243,7 +2245,7 @@ static int rcu_nocb_cb_kthread(void *arg) /* Is a deferred wakeup of rcu_nocb_kthread() required? */ static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) { - return READ_ONCE(rdp->nocb_defer_wakeup); + return READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT; } /* Do a deferred wakeup of rcu_nocb_kthread(). */ @@ -2337,6 +2339,12 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP)); + /* Make sure nocb timer won't stay around */ + rcu_nocb_lock_irqsave(rdp, flags); + WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF); + rcu_nocb_unlock_irqrestore(rdp, flags); + del_timer_sync(&rdp->nocb_timer); + return ret; } @@ -2394,6 +2402,8 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp) * SEGCBLIST_SOFTIRQ_ONLY mode. */ raw_spin_lock_irqsave(&rdp->nocb_lock, flags); + /* Re-enable nocb timer */ + WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); /* * We didn't take the nocb lock while working on the * rdp->cblist in SEGCBLIST_SOFTIRQ_ONLY mode. From 314202f84ddd61e4d7576ef62570ad2e2d9db06b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:24 +0100 Subject: [PATCH 070/130] rcu/nocb: Flush bypass before setting SEGCBLIST_SOFTIRQ_ONLY This commit flushes the bypass queue and sets state to avoid its being refilled before switching to the final de-offloaded state. To avoid refilling, this commit sets SEGCBLIST_SOFTIRQ_ONLY before re-enabling IRQs. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c88ad628595b..35dc9b31e9a4 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2339,12 +2339,21 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) swait_event_exclusive(rdp->nocb_state_wq, !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP)); - /* Make sure nocb timer won't stay around */ rcu_nocb_lock_irqsave(rdp, flags); + /* Make sure nocb timer won't stay around */ WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_OFF); rcu_nocb_unlock_irqrestore(rdp, flags); del_timer_sync(&rdp->nocb_timer); + /* + * Flush bypass. While IRQs are disabled and once we set + * SEGCBLIST_SOFTIRQ_ONLY, no callback is supposed to be + * enqueued on bypass. + */ + rcu_nocb_lock_irqsave(rdp, flags); + rcu_nocb_flush_bypass(rdp, NULL, jiffies); + rcu_nocb_unlock_irqrestore(rdp, flags); + return ret; } From b9ced9e1ab51ed6057ac8198fd1eeb404a32a867 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:25 +0100 Subject: [PATCH 071/130] rcu/nocb: Set SEGCBLIST_SOFTIRQ_ONLY at the very last stage of de-offloading This commit sets SEGCBLIST_SOFTIRQ_ONLY once toggling is otherwise fully complete, allowing further RCU callback manipulation to be carried out locklessly and locally. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 35dc9b31e9a4..8641b72f6d0d 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2352,7 +2352,14 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) */ rcu_nocb_lock_irqsave(rdp, flags); rcu_nocb_flush_bypass(rdp, NULL, jiffies); - rcu_nocb_unlock_irqrestore(rdp, flags); + rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY); + /* + * With SEGCBLIST_SOFTIRQ_ONLY, we can't use + * rcu_nocb_unlock_irqrestore() anymore. Theoretically we + * could set SEGCBLIST_SOFTIRQ_ONLY with cb unlocked and IRQs + * disabled now, but let's be paranoid. + */ + raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); return ret; } From e3abe959fbd57aa751bc533677a35c411cee9b16 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:26 +0100 Subject: [PATCH 072/130] rcu/nocb: Only cond_resched() from actual offloaded batch processing During a toggle operations, rcu_do_batch() may be invoked concurrently by softirqs and offloaded processing for a given CPU's callbacks. This commit therefore makes sure cond_resched() is invoked only from the offloaded context. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 83362f6f1119..4ef59a5416a3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2516,8 +2516,7 @@ static void rcu_do_batch(struct rcu_data *rdp) /* Exceeded the time limit, so leave. */ break; } - if (offloaded) { - WARN_ON_ONCE(in_serving_softirq()); + if (!in_serving_softirq()) { local_bh_enable(); lockdep_assert_irqs_enabled(); cond_resched_tasks_rcu_qs(); From 32aa2f4170d22f0b9fcb75ab05679ab122fae373 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:27 +0100 Subject: [PATCH 073/130] rcu/nocb: Process batch locally as long as offloading isn't complete This commit makes sure to process the callbacks locally (via either RCU_SOFTIRQ or the rcuc kthread) whenever the segcblist isn't entirely offloaded. This ensures that callbacks are invoked one way or another while a CPU is in the middle of a toggle operation. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.h | 12 ++++++++++++ kernel/rcu/tree.c | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 28c9a5225afc..afad6fc6311c 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -95,6 +95,18 @@ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) return false; } +static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) +{ + int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; + + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { + if ((rsclp->flags & flags) == flags) + return true; + } + + return false; +} + /* * Are all segments following the specified segment of the specified * rcu_segcblist structure empty of callbacks? (The specified diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 4ef59a5416a3..ec14c017c0e3 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2700,6 +2700,7 @@ static __latent_entropy void rcu_core(void) struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); + const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); if (cpu_is_offline(smp_processor_id())) return; @@ -2729,7 +2730,7 @@ static __latent_entropy void rcu_core(void) rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); /* If there are callbacks ready, invoke them. */ - if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && + if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) && likely(READ_ONCE(rcu_scheduler_fully_active))) rcu_do_batch(rdp); From 634954c2dbf88e67aa267798f60af6b9a476cf4b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:28 +0100 Subject: [PATCH 074/130] rcu/nocb: Locally accelerate callbacks as long as offloading isn't complete The local callbacks processing checks if any callbacks need acceleration. This commit carries out this checking under nocb lock protection in the middle of toggle operations, during which time rcu_core() executes concurrently with GP/CB kthreads. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ec14c017c0e3..03810a58fdd0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2699,7 +2699,6 @@ static __latent_entropy void rcu_core(void) unsigned long flags; struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); struct rcu_node *rnp = rdp->mynode; - const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist); const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist); if (cpu_is_offline(smp_processor_id())) @@ -2720,11 +2719,11 @@ static __latent_entropy void rcu_core(void) /* No grace period and unregistered callbacks? */ if (!rcu_gp_in_progress() && - rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { - local_irq_save(flags); + rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) { + rcu_nocb_lock_irqsave(rdp, flags); if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) rcu_accelerate_cbs_unlocked(rnp, rdp); - local_irq_restore(flags); + rcu_nocb_unlock_irqrestore(rdp, flags); } rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); From 43759fe5a137389e94ed6d4680c3c63c17273158 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 11 Nov 2020 23:53:13 +0100 Subject: [PATCH 075/130] cpu/hotplug: Add lockdep_is_cpus_held() This commit adds a lockdep_is_cpus_held() function to verify that the proper locks are held and that various operations are running in the correct context. Signed-off-by: Frederic Weisbecker Cc: Paul E. McKenney Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Cc: Boqun Feng Signed-off-by: Paul E. McKenney --- include/linux/cpu.h | 2 ++ kernel/cpu.c | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d6428aaf67e7..3aaa0687e8df 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -111,6 +111,8 @@ static inline void cpu_maps_update_done(void) #endif /* CONFIG_SMP */ extern struct bus_type cpu_subsys; +extern int lockdep_is_cpus_held(void); + #ifdef CONFIG_HOTPLUG_CPU extern void cpus_write_lock(void); extern void cpus_write_unlock(void); diff --git a/kernel/cpu.c b/kernel/cpu.c index 4e11e91010e1..1b6302ecbabe 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -330,6 +330,13 @@ void lockdep_assert_cpus_held(void) percpu_rwsem_assert_held(&cpu_hotplug_lock); } +#ifdef CONFIG_LOCKDEP +int lockdep_is_cpus_held(void) +{ + return percpu_rwsem_is_held(&cpu_hotplug_lock); +} +#endif + static void lockdep_acquire_cpus_lock(void) { rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_); From dcd42591ebb8a25895b551a5297ea9c24414ba54 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:33 +0100 Subject: [PATCH 076/130] timer: Add timer_curr_running() This commit adds a timer_curr_running() function that verifies that the current code is running in the context of the specified timer's handler. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- include/linux/timer.h | 2 ++ kernel/time/timer.c | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/include/linux/timer.h b/include/linux/timer.h index fda13c9d1256..4118a97e62fb 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -192,6 +192,8 @@ extern int try_to_del_timer_sync(struct timer_list *timer); #define del_singleshot_timer_sync(t) del_timer_sync(t) +extern bool timer_curr_running(struct timer_list *timer); + extern void init_timers(void); struct hrtimer; extern enum hrtimer_restart it_real_fn(struct hrtimer *); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 8dbc008f8942..f9b2096456e5 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1237,6 +1237,19 @@ int try_to_del_timer_sync(struct timer_list *timer) } EXPORT_SYMBOL(try_to_del_timer_sync); +bool timer_curr_running(struct timer_list *timer) +{ + int i; + + for (i = 0; i < NR_BASES; i++) { + struct timer_base *base = this_cpu_ptr(&timer_bases[i]); + if (base->running_timer == timer) + return true; + } + + return false; +} + #ifdef CONFIG_PREEMPT_RT static __init void timer_base_init_expiry_lock(struct timer_base *base) { From 2c4319bd1d14d01f5b6654a90c2b6362f3a407d8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 23 Sep 2020 17:39:46 -0700 Subject: [PATCH 077/130] rcutorture: Test runtime toggling of CPUs' callback offloading Frederic Weisbecker is adding the ability to change the rcu_nocbs state of CPUs at runtime, that is, to offload and deoffload their RCU callback processing without the need to reboot. As the old saying goes, "if it ain't tested, it don't work", so this commit therefore adds prototype rcutorture testing for this capability. Signed-off-by: Paul E. McKenney Cc: Frederic Weisbecker --- .../admin-guide/kernel-parameters.txt | 8 ++ kernel/rcu/rcutorture.c | 90 ++++++++++++++++++- 2 files changed, 95 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c722ec19cd00..9f8ac776ae41 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4338,6 +4338,14 @@ stress RCU, they don't participate in the actual test, hence the "fake". + rcutorture.nocbs_nthreads= [KNL] + Set number of RCU callback-offload togglers. + Zero (the default) disables toggling. + + rcutorture.nocbs_toggle= [KNL] + Set the delay in milliseconds between successive + callback-offload toggling attempts. + rcutorture.nreaders= [KNL] Set number of RCU readers. The value -1 selects N-1, where N is the number of CPUs. A value diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 528ed10b78fd..22735bc3eacc 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -97,6 +97,8 @@ torture_param(int, object_debug, 0, torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable"); +torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); +torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)"); torture_param(int, read_exit_burst, 16, @@ -127,10 +129,12 @@ static char *torture_type = "rcu"; module_param(torture_type, charp, 0444); MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); +static int nrealnocbers; static int nrealreaders; static struct task_struct *writer_task; static struct task_struct **fakewriter_tasks; static struct task_struct **reader_tasks; +static struct task_struct **nocb_tasks; static struct task_struct *stats_task; static struct task_struct *fqs_task; static struct task_struct *boost_tasks[NR_CPUS]; @@ -174,6 +178,8 @@ static unsigned long n_read_exits; static struct list_head rcu_torture_removed; static unsigned long shutdown_jiffies; static unsigned long start_gp_seq; +static atomic_long_t n_nocb_offload; +static atomic_long_t n_nocb_deoffload; static int rcu_torture_writer_state; #define RTWS_FIXED_DELAY 0 @@ -1498,6 +1504,53 @@ rcu_torture_reader(void *arg) return 0; } +/* + * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to + * increase race probabilities and fuzzes the interval between toggling. + */ +static int rcu_nocb_toggle(void *arg) +{ + int cpu; + int maxcpu = -1; + int oldnice = task_nice(current); + long r; + DEFINE_TORTURE_RANDOM(rand); + ktime_t toggle_delay; + unsigned long toggle_fuzz; + ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); + + VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); + while (!rcu_inkernel_boot_has_ended()) + schedule_timeout_interruptible(HZ / 10); + for_each_online_cpu(cpu) + maxcpu = cpu; + WARN_ON(maxcpu < 0); + if (toggle_interval > ULONG_MAX) + toggle_fuzz = ULONG_MAX >> 3; + else + toggle_fuzz = toggle_interval >> 3; + if (toggle_fuzz <= 0) + toggle_fuzz = NSEC_PER_USEC; + do { + r = torture_random(&rand); + cpu = (r >> 4) % (maxcpu + 1); + if (r & 0x1) { + rcu_nocb_cpu_offload(cpu); + atomic_long_inc(&n_nocb_offload); + } else { + rcu_nocb_cpu_deoffload(cpu); + atomic_long_inc(&n_nocb_deoffload); + } + toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; + set_current_state(TASK_INTERRUPTIBLE); + schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); + if (stutter_wait("rcu_nocb_toggle")) + sched_set_normal(current, oldnice); + } while (!torture_must_stop()); + torture_kthread_stopping("rcu_nocb_toggle"); + return 0; +} + /* * Print torture statistics. Caller must ensure that there is only * one call to this function at a given time!!! This is normally @@ -1553,7 +1606,9 @@ rcu_torture_stats_print(void) data_race(n_barrier_successes), data_race(n_barrier_attempts), data_race(n_rcu_torture_barrier_error)); - pr_cont("read-exits: %ld\n", data_race(n_read_exits)); + pr_cont("read-exits: %ld ", data_race(n_read_exits)); + pr_cont("nocb-toggles: %ld:%ld\n", + atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); pr_alert("%s%s ", torture_type, TORTURE_FLAG); if (atomic_read(&n_rcu_torture_mberror) || @@ -1647,7 +1702,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) "stall_cpu_block=%d " "n_barrier_cbs=%d " "onoff_interval=%d onoff_holdoff=%d " - "read_exit_delay=%d read_exit_burst=%d\n", + "read_exit_delay=%d read_exit_burst=%d " + "nocbs_nthreads=%d nocbs_toggle=%d\n", torture_type, tag, nrealreaders, nfakewriters, stat_interval, verbose, test_no_idle_hz, shuffle_interval, stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, @@ -1657,7 +1713,8 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) stall_cpu_block, n_barrier_cbs, onoff_interval, onoff_holdoff, - read_exit_delay, read_exit_burst); + read_exit_delay, read_exit_burst, + nocbs_nthreads, nocbs_toggle); } static int rcutorture_booster_cleanup(unsigned int cpu) @@ -2500,6 +2557,13 @@ rcu_torture_cleanup(void) torture_stop_kthread(rcu_torture_stall, stall_task); torture_stop_kthread(rcu_torture_writer, writer_task); + if (nocb_tasks) { + for (i = 0; i < nrealnocbers; i++) + torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); + kfree(nocb_tasks); + nocb_tasks = NULL; + } + if (reader_tasks) { for (i = 0; i < nrealreaders; i++) torture_stop_kthread(rcu_torture_reader, @@ -2762,6 +2826,26 @@ rcu_torture_init(void) if (firsterr) goto unwind; } + nrealnocbers = nocbs_nthreads; + if (WARN_ON(nrealnocbers < 0)) + nrealnocbers = 1; + if (WARN_ON(nocbs_toggle < 0)) + nocbs_toggle = HZ; + if (nrealnocbers > 0) { + nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); + if (nocb_tasks == NULL) { + VERBOSE_TOROUT_ERRSTRING("out of memory"); + firsterr = -ENOMEM; + goto unwind; + } + } else { + nocb_tasks = NULL; + } + for (i = 0; i < nrealnocbers; i++) { + firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); + if (firsterr) + goto unwind; + } if (stat_interval > 0) { firsterr = torture_create_kthread(rcu_torture_stats, NULL, stats_task); From 70e8088b97211177225acf499247b3741cc8a229 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 13 Nov 2020 13:13:29 +0100 Subject: [PATCH 078/130] tools/rcutorture: Support nocb toggle in TREE01 This commit adds periodic toggling of 7 of 8 CPUs every second to TREE01 in order to test NOCB toggle code. Cc: Josh Triplett Cc: Steven Rostedt Cc: Mathieu Desnoyers Cc: Lai Jiangshan Cc: Joel Fernandes Cc: Neeraj Upadhyay Cc: Thomas Gleixner Inspired-by: Paul E. McKenney Tested-by: Boqun Feng Signed-off-by: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot index d6da9a61d44a..40af3df0f397 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot @@ -2,5 +2,7 @@ maxcpus=8 nr_cpus=43 rcutree.gp_preinit_delay=3 rcutree.gp_init_delay=3 rcutree.gp_cleanup_delay=3 -rcu_nocbs=0 +rcu_nocbs=0-1,3-7 +rcutorture.nocbs_nthreads=8 +rcutorture.nocbs_toggle=1000 rcutorture.fwd_progress=0 From 341690611f8d488859f42a761f5d7cbac6ba2940 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Dec 2020 10:20:34 -0800 Subject: [PATCH 079/130] rcu/nocb: Add grace period and task state to show_rcu_nocb_state() output This commit improves debuggability by indicating which grace period each batch of nocb callbacks is waiting on and by showing the task state and last CPU for reach nocb kthread. [ paulmck: Handle !SMP CB offloading per kernel test robot feedback. ] Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.h | 11 +++++++++++ kernel/rcu/tree_plugin.h | 39 +++++++++++++++++++++++++++++++------- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index afad6fc6311c..311060279f1a 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -117,6 +117,17 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); } +/* + * Is the specified segment of the specified rcu_segcblist structure + * empty of callbacks? + */ +static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) +{ + if (seg == RCU_DONE_TAIL) + return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; + return rsclp->tails[seg - 1] == rsclp->tails[seg]; +} + void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v); void rcu_segcblist_init(struct rcu_segcblist *rsclp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 8641b72f6d0d..5ee1113fbf39 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2672,6 +2672,19 @@ void rcu_bind_current_to_nocb(void) } EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb); +// The ->on_cpu field is available only in CONFIG_SMP=y, so... +#ifdef CONFIG_SMP +static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) +{ + return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : ""; +} +#else // #ifdef CONFIG_SMP +static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) +{ + return ""; +} +#endif // #else #ifdef CONFIG_SMP + /* * Dump out nocb grace-period kthread state for the specified rcu_data * structure. @@ -2680,7 +2693,7 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp) { struct rcu_node *rnp = rdp->mynode; - pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n", + pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n", rdp->cpu, "kK"[!!rdp->nocb_gp_kthread], "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], @@ -2694,12 +2707,17 @@ static void show_rcu_nocb_gp_state(struct rcu_data *rdp) ".B"[!!rdp->nocb_gp_bypass], ".G"[!!rdp->nocb_gp_gp], (long)rdp->nocb_gp_seq, - rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops)); + rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops), + rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.', + rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, + show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread)); } /* Dump out nocb kthread state for the specified rcu_data structure. */ static void show_rcu_nocb_state(struct rcu_data *rdp) { + char bufw[20]; + char bufr[20]; struct rcu_segcblist *rsclp = &rdp->cblist; bool waslocked; bool wastimer; @@ -2708,7 +2726,9 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) if (rdp->nocb_gp_rdp == rdp) show_rcu_nocb_gp_state(rdp); - pr_info(" CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n", + sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]); + sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); + pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", rdp->cpu, rdp->nocb_gp_rdp->cpu, "kK"[!!rdp->nocb_cb_kthread], "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], @@ -2720,11 +2740,16 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) jiffies - rdp->nocb_nobypass_last, rdp->nocb_nobypass_count, ".D"[rcu_segcblist_ready_cbs(rsclp)], - ".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)], - ".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)], - ".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)], + ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)], + rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw, + ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)], + rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr, + ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)], ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], - rcu_segcblist_n_cbs(&rdp->cblist)); + rcu_segcblist_n_cbs(&rdp->cblist), + rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.', + rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1, + show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread)); /* It is OK for GP kthreads to have GP state. */ if (rdp->nocb_gp_rdp == rdp) From 3d0cef50f32e2bc69f60909584c18623bba9a6c6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 18 Dec 2020 13:17:37 -0800 Subject: [PATCH 080/130] rcu/nocb: Add nocb CB kthread list to show_rcu_nocb_state() output This commit improves debuggability by indicating laying out the order in which rcuoc kthreads appear in the ->nocb_next_cb_rdp list. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree_plugin.h | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 5ee1113fbf39..bc63a6b9d532 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2730,6 +2730,7 @@ static void show_rcu_nocb_state(struct rcu_data *rdp) sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]); pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n", rdp->cpu, rdp->nocb_gp_rdp->cpu, + rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1, "kK"[!!rdp->nocb_cb_kthread], "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], "cC"[!!atomic_read(&rdp->nocb_lock_contended)], From f759081e8f5ac640df1c7125540759bbcb4eb0e2 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 21 Dec 2020 11:17:16 -0800 Subject: [PATCH 081/130] rcu/nocb: Code-style nits in callback-offloading toggling This commit addresses a few code-style nits in callback-offloading toggling, including one that predates this toggling. Cc: Frederic Weisbecker Signed-off-by: Paul E. McKenney --- kernel/rcu/rcu_segcblist.h | 19 +++++----------- kernel/rcu/rcutorture.c | 2 +- kernel/rcu/tree_plugin.h | 45 +++++++++++++++++++------------------- kernel/time/timer.c | 1 + 4 files changed, 30 insertions(+), 37 deletions(-) diff --git a/kernel/rcu/rcu_segcblist.h b/kernel/rcu/rcu_segcblist.h index 311060279f1a..9a19328ff251 100644 --- a/kernel/rcu/rcu_segcblist.h +++ b/kernel/rcu/rcu_segcblist.h @@ -80,17 +80,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); } -/* Is the specified rcu_segcblist offloaded? */ +/* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */ static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) { - if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { - /* - * Complete de-offloading happens only when SEGCBLIST_SOFTIRQ_ONLY - * is set. - */ - if (!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) - return true; - } + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && + !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY)) + return true; return false; } @@ -99,10 +94,8 @@ static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rscl { int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; - if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { - if ((rsclp->flags & flags) == flags) - return true; - } + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags) + return true; return false; } diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 22735bc3eacc..b9dd63c166b9 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1606,7 +1606,7 @@ rcu_torture_stats_print(void) data_race(n_barrier_successes), data_race(n_barrier_attempts), data_race(n_rcu_torture_barrier_error)); - pr_cont("read-exits: %ld ", data_race(n_read_exits)); + pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. pr_cont("nocb-toggles: %ld:%ld\n", atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index bc63a6b9d532..6f56f9e51e67 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1962,17 +1962,17 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta *needwake_state = true; } return true; - } else { - /* - * De-offloading. Clear our flag and notify the de-offload worker. - * We will ignore this rdp until it ever gets re-offloaded. - */ - WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); - rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP); - if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) - *needwake_state = true; - return false; } + + /* + * De-offloading. Clear our flag and notify the de-offload worker. + * We will ignore this rdp until it ever gets re-offloaded. + */ + WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)); + rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP); + if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) + *needwake_state = true; + return false; } @@ -2005,6 +2005,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp) WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { bool needwake_state = false; + if (!nocb_gp_enabled_cb(rdp)) continue; trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); @@ -2160,11 +2161,11 @@ static inline bool nocb_cb_wait_cond(struct rcu_data *rdp) static void nocb_cb_wait(struct rcu_data *rdp) { struct rcu_segcblist *cblist = &rdp->cblist; - struct rcu_node *rnp = rdp->mynode; - bool needwake_state = false; - bool needwake_gp = false; unsigned long cur_gp_seq; unsigned long flags; + bool needwake_state = false; + bool needwake_gp = false; + struct rcu_node *rnp = rdp->mynode; local_irq_save(flags); rcu_momentary_dyntick_idle(); @@ -2217,8 +2218,8 @@ static void nocb_cb_wait(struct rcu_data *rdp) swait_event_interruptible_exclusive(rdp->nocb_cb_wq, nocb_cb_wait_cond(rdp)); - /* ^^^ Ensure CB invocation follows _sleep test. */ - if (smp_load_acquire(&rdp->nocb_cb_sleep)) { + // VVV Ensure CB invocation follows _sleep test. + if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^ WARN_ON(signal_pending(current)); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); } @@ -2323,7 +2324,7 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp) unsigned long flags; int ret; - printk("De-offloading %d\n", rdp->cpu); + pr_info("De-offloading %d\n", rdp->cpu); rcu_nocb_lock_irqsave(rdp, flags); /* @@ -2384,11 +2385,10 @@ int rcu_nocb_cpu_deoffload(int cpu) mutex_lock(&rcu_state.barrier_mutex); cpus_read_lock(); if (rcu_segcblist_is_offloaded(&rdp->cblist)) { - if (cpu_online(cpu)) { + if (cpu_online(cpu)) ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp); - } else { + else ret = __rcu_nocb_rdp_deoffload(rdp); - } if (!ret) cpumask_clear_cpu(cpu, rcu_nocb_mask); } @@ -2412,7 +2412,7 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp) if (!rdp->nocb_gp_rdp) return -EINVAL; - printk("Offloading %d\n", rdp->cpu); + pr_info("Offloading %d\n", rdp->cpu); /* * Can't use rcu_nocb_lock_irqsave() while we are in * SEGCBLIST_SOFTIRQ_ONLY mode. @@ -2460,11 +2460,10 @@ int rcu_nocb_cpu_offload(int cpu) mutex_lock(&rcu_state.barrier_mutex); cpus_read_lock(); if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { - if (cpu_online(cpu)) { + if (cpu_online(cpu)) ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp); - } else { + else ret = __rcu_nocb_rdp_offload(rdp); - } if (!ret) cpumask_set_cpu(cpu, rcu_nocb_mask); } diff --git a/kernel/time/timer.c b/kernel/time/timer.c index f9b2096456e5..f475f1a027c8 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1243,6 +1243,7 @@ bool timer_curr_running(struct timer_list *timer) for (i = 0; i < NR_BASES; i++) { struct timer_base *base = this_cpu_ptr(&timer_bases[i]); + if (base->running_timer == timer) return true; } From 147c6852d34563b87ff0e67383c2bf675e8248f6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 22 Dec 2020 16:49:11 -0800 Subject: [PATCH 082/130] rcu: Do any deferred nocb wakeups at CPU offline time Because the need to wake a nocb GP kthread ("rcuog") is sometimes detected when wakeups cannot be done, these wakeups can be deferred. The wakeups are then carried out by calls to do_nocb_deferred_wakeup() at various safe points in the code, including RCU's idle hooks. However, when a CPU goes offline, it invokes arch_cpu_idle_dead() without invoking any of RCU's idle hooks. This commit therefore adds a call to do_nocb_deferred_wakeup() in rcu_report_dead() in order to handle any deferred wakeups that have been requested by the outgoing CPU. Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 03810a58fdd0..e6dee714efe0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -4178,6 +4178,9 @@ void rcu_report_dead(unsigned int cpu) struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ + // Do any dangling deferred wakeups. + do_nocb_deferred_wakeup(rdp); + /* QS for any half-done expedited grace period. */ preempt_disable(); rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); From 683954e55c981467bfd4688417e914bafc40959f Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Mon, 16 Nov 2020 21:36:00 +0530 Subject: [PATCH 083/130] rcu: Check and report missed fqs timer wakeup on RCU stall For a new grace period request, the RCU GP kthread transitions through following states: a. [RCU_GP_WAIT_GPS] -> [RCU_GP_DONE_GPS] The RCU_GP_WAIT_GPS state is where the GP kthread waits for a request for a new GP. Once it receives a request (for example, when a new RCU callback is queued), the GP kthread transitions to RCU_GP_DONE_GPS. b. [RCU_GP_DONE_GPS] -> [RCU_GP_ONOFF] Grace period initialization starts in rcu_gp_init(), which records the start of new GP in rcu_state.gp_seq and transitions to RCU_GP_ONOFF. c. [RCU_GP_ONOFF] -> [RCU_GP_INIT] The purpose of the RCU_GP_ONOFF state is to apply the online/offline information that was buffered for any CPUs that recently came online or went offline. This state is maintained in per-leaf rcu_node bitmasks, with the buffered state in ->qsmaskinitnext and the state for the upcoming GP in ->qsmaskinit. At the end of this RCU_GP_ONOFF state, each bit in ->qsmaskinit will correspond to a CPU that must pass through a quiescent state before the upcoming grace period is allowed to complete. However, a leaf rcu_node structure with an all-zeroes ->qsmaskinit cannot necessarily be ignored. In preemptible RCU, there might well be tasks still in RCU read-side critical sections that were first preempted while running on one of the CPUs managed by this structure. Such tasks will be queued on this structure's ->blkd_tasks list. Only after this list fully drains can this leaf rcu_node structure be ignored, and even then only if none of its CPUs have come back online in the meantime. Once that happens, the ->qsmaskinit masks further up the tree will be updated to exclude this leaf rcu_node structure. Once the ->qsmaskinitnext and ->qsmaskinit fields have been updated as needed, the GP kthread transitions to RCU_GP_INIT. d. [RCU_GP_INIT] -> [RCU_GP_WAIT_FQS] The purpose of the RCU_GP_INIT state is to copy each ->qsmaskinit to the ->qsmask field within each rcu_node structure. This copying is done breadth-first from the root to the leaves. Why not just copy directly from ->qsmaskinitnext to ->qsmask? Because the ->qsmaskinitnext masks can change in the meantime as additional CPUs come online or go offline. Such changes would result in inconsistencies in the ->qsmask fields up and down the tree, which could in turn result in too-short grace periods or grace-period hangs. These issues are avoided by snapshotting the leaf rcu_node structures' ->qsmaskinitnext fields into their ->qsmaskinit counterparts, generating a consistent set of ->qsmaskinit fields throughout the tree, and only then copying these consistent ->qsmaskinit fields to their ->qsmask counterparts. Once this initialization step is complete, the GP kthread transitions to RCU_GP_WAIT_FQS, where it waits to do a force-quiescent-state scan on the one hand or for the end of the grace period on the other. e. [RCU_GP_WAIT_FQS] -> [RCU_GP_DOING_FQS] The RCU_GP_WAIT_FQS state waits for one of three things: (1) An explicit request to do a force-quiescent-state scan, (2) The end of the grace period, or (3) A short interval of time, after which it will do a force-quiescent-state (FQS) scan. The explicit request can come from rcutorture or from any CPU that has too many RCU callbacks queued (see the qhimark kernel parameter and the RCU_GP_FLAG_OVLD flag). The aforementioned "short period of time" is specified by the jiffies_till_first_fqs boot parameter for a given grace period's first FQS scan and by the jiffies_till_next_fqs for later FQS scans. Either way, once the wait is over, the GP kthread transitions to RCU_GP_DOING_FQS. f. [RCU_GP_DOING_FQS] -> [RCU_GP_CLEANUP] The RCU_GP_DOING_FQS state performs an FQS scan. Each such scan carries out two functions for any CPU whose bit is still set in its leaf rcu_node structure's ->qsmask field, that is, for any CPU that has not yet reported a quiescent state for the current grace period: i. Report quiescent states on behalf of CPUs that have been observed to be idle (from an RCU perspective) since the beginning of the grace period. ii. If the current grace period is too old, take various actions to encourage holdout CPUs to pass through quiescent states, including enlisting the aid of any calls to cond_resched() and might_sleep(), and even including IPIing the holdout CPUs. These checks are skipped for any leaf rcu_node structure with a all-zero ->qsmask field, however such structures are subject to RCU priority boosting if there are tasks on a given structure blocking the current grace period. The end of the grace period is detected when the root rcu_node structure's ->qsmask is zero and when there are no longer any preempted tasks blocking the current grace period. (No, this last check is not redundant. To see this, consider an rcu_node tree having exactly one structure that serves as both root and leaf.) Once the end of the grace period is detected, the GP kthread transitions to RCU_GP_CLEANUP. g. [RCU_GP_CLEANUP] -> [RCU_GP_CLEANED] The RCU_GP_CLEANUP state marks the end of grace period by updating the rcu_state structure's ->gp_seq field and also all rcu_node structures' ->gp_seq field. As before, the rcu_node tree is traversed in breadth first order. Once this update is complete, the GP kthread transitions to the RCU_GP_CLEANED state. i. [RCU_GP_CLEANED] -> [RCU_GP_INIT] Once in the RCU_GP_CLEANED state, the GP kthread immediately transitions into the RCU_GP_INIT state. j. The role of timers. If there is at least one idle CPU, and if timers are not firing, the transition from RCU_GP_DOING_FQS to RCU_GP_CLEANUP will never happen. Timers can fail to fire for a number of reasons, including issues in timer configuration, issues in the timer framework, and failure to handle softirqs (for example, when there is a storm of interrupts). Whatever the reason, if the timers fail to fire, the GP kthread will never be awakened, resulting in RCU CPU stall warnings and eventually in OOM. However, an RCU CPU stall warning has a large number of potential causes, as documented in Documentation/RCU/stallwarn.rst. This commit therefore adds analysis to the RCU CPU stall-warning code to emit an additional message if the cause of the stall is likely to be timer failure. Signed-off-by: Neeraj Upadhyay Signed-off-by: Paul E. McKenney --- Documentation/RCU/stallwarn.rst | 23 ++++++++++++++++++++++- kernel/rcu/tree.c | 25 +++++++++++++++---------- kernel/rcu/tree_stall.h | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 11 deletions(-) diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst index c9ab6af4d3be..d53856cc6390 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -92,7 +92,9 @@ warnings: buggy timer hardware through bugs in the interrupt or exception path (whether hardware, firmware, or software) through bugs in Linux's timer subsystem through bugs in the scheduler, and, - yes, even including bugs in RCU itself. + yes, even including bugs in RCU itself. It can also result in + the ``rcu_.*timer wakeup didn't happen for`` console-log message, + which will include additional debugging information. - A bug in the RCU implementation. @@ -292,6 +294,25 @@ kthread is waiting for a short timeout, the "state" precedes value of the task_struct ->state field, and the "cpu" indicates that the grace-period kthread last ran on CPU 5. +If the relevant grace-period kthread does not wake from FQS wait in a +reasonable time, then the following additional line is printed:: + + kthread timer wakeup didn't happen for 23804 jiffies! g7076 f0x0 RCU_GP_WAIT_FQS(5) ->state=0x402 + +The "23804" indicates that kthread's timer expired more than 23 thousand +jiffies ago. The rest of the line has meaning similar to the kthread +starvation case. + +Additionally, the following line is printed:: + + Possible timer handling issue on cpu=4 timer-softirq=11142 + +Here "cpu" indicates that the grace-period kthread last ran on CPU 4, +where it queued the fqs timer. The number following the "timer-softirq" +is the current ``TIMER_SOFTIRQ`` count on cpu 4. If this value does not +change on successive RCU CPU stall warnings, there is further reason to +suspect a timer problem. + Multiple Warnings From One Stall ================================ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e..e918f100cc34 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1765,7 +1765,7 @@ static bool rcu_gp_init(void) * go offline later. Please also refer to "Hotplug CPU" section * of RCU's Requirements documentation. */ - rcu_state.gp_state = RCU_GP_ONOFF; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF); rcu_for_each_leaf_node(rnp) { smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values. firstseq = READ_ONCE(rnp->ofl_seq); @@ -1831,7 +1831,7 @@ static bool rcu_gp_init(void) * The grace period cannot complete until the initialization * process finishes, because this kthread handles both. */ - rcu_state.gp_state = RCU_GP_INIT; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT); rcu_for_each_node_breadth_first(rnp) { rcu_gp_slow(gp_init_delay); raw_spin_lock_irqsave_rcu_node(rnp, flags); @@ -1930,17 +1930,22 @@ static void rcu_gp_fqs_loop(void) ret = 0; for (;;) { if (!ret) { - rcu_state.jiffies_force_qs = jiffies + j; + WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j); + /* + * jiffies_force_qs before RCU_GP_WAIT_FQS state + * update; required for stall checks. + */ + smp_wmb(); WRITE_ONCE(rcu_state.jiffies_kick_kthreads, jiffies + (j ? 3 * j : 2)); } trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("fqswait")); - rcu_state.gp_state = RCU_GP_WAIT_FQS; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS); ret = swait_event_idle_timeout_exclusive( rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j); rcu_gp_torture_wait(); - rcu_state.gp_state = RCU_GP_DOING_FQS; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS); /* Locking provides needed memory barriers. */ /* If grace period done, leave loop. */ if (!READ_ONCE(rnp->qsmask) && @@ -2054,7 +2059,7 @@ static void rcu_gp_cleanup(void) trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end")); rcu_seq_end(&rcu_state.gp_seq); ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq); - rcu_state.gp_state = RCU_GP_IDLE; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE); /* Check for GP requests since above loop. */ rdp = this_cpu_ptr(&rcu_data); if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) { @@ -2093,12 +2098,12 @@ static int __noreturn rcu_gp_kthread(void *unused) for (;;) { trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("reqwait")); - rcu_state.gp_state = RCU_GP_WAIT_GPS; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS); swait_event_idle_exclusive(rcu_state.gp_wq, READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_INIT); rcu_gp_torture_wait(); - rcu_state.gp_state = RCU_GP_DONE_GPS; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS); /* Locking provides needed memory barrier. */ if (rcu_gp_init()) break; @@ -2113,9 +2118,9 @@ static int __noreturn rcu_gp_kthread(void *unused) rcu_gp_fqs_loop(); /* Handle grace-period end. */ - rcu_state.gp_state = RCU_GP_CLEANUP; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP); rcu_gp_cleanup(); - rcu_state.gp_state = RCU_GP_CLEANED; + WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED); } } diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h index 29cf096b5d20..353da30bd558 100644 --- a/kernel/rcu/tree_stall.h +++ b/kernel/rcu/tree_stall.h @@ -482,6 +482,36 @@ static void rcu_check_gp_kthread_starvation(void) } } +/* Complain about missing wakeups from expired fqs wait timer */ +static void rcu_check_gp_kthread_expired_fqs_timer(void) +{ + struct task_struct *gpk = rcu_state.gp_kthread; + short gp_state; + unsigned long jiffies_fqs; + int cpu; + + /* + * Order reads of .gp_state and .jiffies_force_qs. + * Matching smp_wmb() is present in rcu_gp_fqs_loop(). + */ + gp_state = smp_load_acquire(&rcu_state.gp_state); + jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); + + if (gp_state == RCU_GP_WAIT_FQS && + time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && + gpk && !READ_ONCE(gpk->on_rq)) { + cpu = task_cpu(gpk); + pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx\n", + rcu_state.name, (jiffies - jiffies_fqs), + (long)rcu_seq_current(&rcu_state.gp_seq), + data_race(rcu_state.gp_flags), + gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, + gpk->state); + pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", + cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); + } +} + static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) { int cpu; @@ -543,6 +573,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) WRITE_ONCE(rcu_state.jiffies_stall, jiffies + 3 * rcu_jiffies_till_stall_check() + 3); + rcu_check_gp_kthread_expired_fqs_timer(); rcu_check_gp_kthread_starvation(); panic_on_rcu_stall(); @@ -578,6 +609,7 @@ static void print_cpu_stall(unsigned long gps) jiffies - gps, (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); + rcu_check_gp_kthread_expired_fqs_timer(); rcu_check_gp_kthread_starvation(); rcu_dump_cpu_stacks(); From bfc19c13d24c70e4fb1dafd76900731bcee97683 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 21 Nov 2020 14:09:48 -0800 Subject: [PATCH 084/130] torture: Add torture.sh torture-everything script Although tailoring a specific set of kvm.sh runs has served rcutorture testing well over many years, it requires a relatively distraction-free environment, which is not always available. This commit therefore adds a prototype torture.sh script that by default tortures pretty much everything the rcutorture scripting is designed to torture, and which can be given command-line arguments to take a more focused approach. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 301 ++++++++++++++++++ 1 file changed, 301 insertions(+) create mode 100644 tools/testing/selftests/rcutorture/bin/torture.sh diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh new file mode 100644 index 000000000000..7f21aab2def7 --- /dev/null +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -0,0 +1,301 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# +# Run a series of torture tests, intended for overnight or +# longer timeframes, and also for large systems. +# +# Usage: torture.sh [ options ] +# +# Copyright (C) 2020 Facebook, Inc. +# +# Authors: Paul E. McKenney + +scriptname=$0 +args="$*" + +# Default duration and apportionment. +duration_base=10 +duration_rcutorture_frac=7 +duration_locktorture_frac=1 +duration_scftorture_frac=2 + +# "yes" or "no" parameters +do_rcutorture=yes +do_locktorture=yes +do_scftorture=yes +do_rcuscale=yes +do_refscale=yes +do_kvfree=yes +do_kasan=yes +do_kcsan=no + +usage () { + echo "Usage: $scriptname optional arguments:" + echo " --doall" + echo " --do-kasan / --do-no-kasan" + echo " --do-kcsan / --do-no-kcsan" + echo " --do-kvfree / --do-no-kvfree" + echo " --do-locktorture / --do-no-locktorture" + echo " --do-none" + echo " --do-rcuscale / --do-no-rcuscale" + echo " --do-rcutorture / --do-no-rcutorture" + echo " --do-refscale / --do-no-refscale" + echo " --do-scftorture / --do-no-scftorture" + echo " --duration [ | h | d ]" + exit 1 +} + +while test $# -gt 0 +do + case "$1" in + --doall) + do_rcutorture=yes + do_locktorture=yes + do_scftorture=yes + do_rcuscale=yes + do_refscale=yes + do_kvfree=yes + do_kasan=yes + do_kcsan=yes + ;; + --do-kasan|--do-no-kasan) + if test "$1" = --do-kasan + then + do_kasan=yes + else + do_kasan=no + fi + ;; + --do-kcsan|--do-no-kcsan) + if test "$1" = --do-kcsan + then + do_kcsan=yes + else + do_kcsan=no + fi + ;; + --do-kvfree|--do-no-kvfree) + if test "$1" = --do-kvfree + then + do_kvfree=yes + else + do_kvfree=no + fi + ;; + --do-locktorture|--do-no-locktorture) + if test "$1" = --do-locktorture + then + do_locktorture=yes + else + do_locktorture=no + fi + ;; + --do-none) + do_rcutorture=no + do_locktorture=no + do_scftorture=no + do_rcuscale=no + do_refscale=no + do_kvfree=no + do_kasan=no + do_kcsan=no + ;; + --do-rcuscale|--do-no-rcuscale) + if test "$1" = --do-rcuscale + then + do_rcuscale=yes + else + do_rcuscale=no + fi + ;; + --do-rcutorture|--do-no-rcutorture) + if test "$1" = --do-rcutorture + then + do_rcutorture=yes + else + do_rcutorture=no + fi + ;; + --do-refscale|--do-no-refscale) + if test "$1" = --do-refscale + then + do_refscale=yes + else + do_refscale=no + fi + ;; + --do-scftorture|--do-no-scftorture) + if test "$1" = --do-scftorture + then + do_scftorture=yes + else + do_scftorture=no + fi + ;; + --duration) + # checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(s\|m\|h\|d\|\)$' '^error' + mult=60 + if echo "$2" | grep -q 's$' + then + mult=1 + elif echo "$2" | grep -q 'h$' + then + mult=3600 + elif echo "$2" | grep -q 'd$' + then + mult=86400 + fi + ts=`echo $2 | sed -e 's/[smhd]$//'` + duration_base=$(($ts*mult)) + shift + ;; + *) + echo Unknown argument $1 + usage + ;; + esac + shift +done + +duration_rcutorture=$((duration_base*duration_rcutorture_frac/10)) +# Need to sum remaining weights, and if duration weights to zero, +# set do_no_rcutorture. @@@ +duration_locktorture=$((duration_base*duration_locktorture_frac/10)) +duration_scftorture=$((duration_base*duration_scftorture_frac/10)) + +T=/tmp/torture.sh.$$ +trap 'rm -rf $T' 0 2 +mkdir $T + +touch $T/failures +touch $T/successes + +ds="`date +%Y.%m.%d-%H.%M.%S`-torture" +startdate="`date`" +starttime="`awk 'BEGIN { print systime() }' < /dev/null`" + +# tortureme flavor command +# Note that "flavor" is an arbitrary string. Supply --torture if needed. +function torture_one { + echo " --- $curflavor:" Start `date` | tee -a $T/log + eval $* --datestamp "$ds/results-$curflavor" > $T/$curflavor.out 2>&1 + retcode=$? + resdir="`grep '^Results directory: ' $T/$curflavor.out | tail -1 | sed -e 's/^Results directory: //'`" + if test -n "$resdir" + then + cp $T/$curflavor.out $resdir/log.long + echo retcode=$retcode >> $resdir/log.long + else + cat $T/$curflavor.out | tee -a $T/log + echo retcode=$retcode | tee -a $T/log + fi + if test "$retcode" == 0 + then + echo "$curflavor($retcode)" $resdir >> $T/successes + else + echo "$curflavor($retcode)" $resdir >> $T/failures + fi +} + +function torture_set { + local flavor=$1 + shift + curflavor=$flavor + torture_one $* + if test "$do_kasan" = "yes" + then + curflavor=${flavor}-kasan + torture_one $* --kasan + fi + if test "$do_kcsan" = "yes" + then + curflavor=${flavor}-kcsan + torture_one $* --kconfig '"CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"' --kmake-arg "CC=clang" --kcsan + fi +} + +if test "$do_rcutorture" = "yes" +then + torture_set "rcutorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration '"$duration_rcutorture"' --configs "TREE10 4*CFLIST" --bootargs "rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000" --trust-make' +fi + +if test "$do_locktorture" = "yes" +then + torture_set "locktorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture lock --allcpus --duration '"$duration_locktorture"' --configs "14*CFLIST" --bootargs "torture.disable_onoff_at_boot" --trust-make' +fi + +if test "$do_scftorture" = "yes" +then + torture_set "scftorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration '"$duration_scftorture"' --kconfig "CONFIG_NR_CPUS=224" --bootargs "scftorture.nthreads=224 torture.disable_onoff_at_boot" --trust-make' +fi + +if test "$do_refscale" = yes +then + primlist="`grep '\.name[ ]*=' kernel/rcu/refscale*.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" +else + primlist= +fi +for prim in $primlist +do + torture_set "refscale-$prim" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --bootargs "refscale.scale_type='"$prim"' refscale.nreaders=224 refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" --trust-make' +done + +if test "$do_rcuscale" = yes +then + primlist="`grep '\.name[ ]*=' kernel/rcu/rcuscale*.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" +else + primlist= +fi +for prim in $primlist +do + torture_set "rcuscale-$prim" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --bootargs "rcuscale.scale_type='"$prim"' rcuscale.nwriters=224 rcuscale.holdoff=20 torture.disable_onoff_at_boot" --trust-make' +done + +if test "$do_kvfree" = "yes" +then + torture_set "rcuscale-kvfree" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=224" --bootargs "rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot" --trust-make' +fi + +echo " --- " $scriptname $args +echo " --- " Done `date` | tee -a $T/log +ret=0 +nsuccesses=0 +echo SUCCESSES: | tee -a $T/log +if test -s "$T/successes" +then + cat "$T/successes" | tee -a $T/log + nsuccesses="`wc -l "$T/successes" | awk '{ print $1 }'`" +fi +nfailures=0 +echo FAILURES: | tee -a $T/log +if test -s "$T/failures" +then + cat "$T/failures" | tee -a $T/log + nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`" + ret=2 +fi +duration="`awk -v starttime=$starttime ' +BEGIN { + s = systime() - starttime; + h = s / 3600; + d = h /24; + if (d < 1) + print h " hours"; + else + print d " days (" h " hours)"; +}' < /dev/null`" +echo Started at $startdate, ended at `date`, duration $duration. | tee -a $T/log +echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log +tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`" +if test -n "$tdir" +then + cp $T/log $tdir +fi +exit $ret + +# RCU CPU stall warnings? +# scftorture warnings? +# Need a way for the invoker to specify clang. +# Work out --configs based on number of available CPUs? +# Need a way to specify --configs. --configs--rcutorture? +# Need to sense CPUs to size scftorture run. Ditto rcuscale and refscale. From 1adb5d6b52251105f77630432b36e340cdcb3390 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 22 Nov 2020 16:49:15 -0800 Subject: [PATCH 085/130] torture: Make torture.sh use common time-duration bash functions This commit makes torture.sh use the new bash functions get_starttime() and get_starttime_duration() created for kvm.sh. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 20 +++++++------------ 1 file changed, 7 insertions(+), 13 deletions(-) mode change 100644 => 100755 tools/testing/selftests/rcutorture/bin/torture.sh diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh old mode 100644 new mode 100755 index 7f21aab2def7..16574046f7b3 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -13,6 +13,10 @@ scriptname=$0 args="$*" +KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM +PATH=${KVM}/bin:$PATH; export PATH +. functions.sh + # Default duration and apportionment. duration_base=10 duration_rcutorture_frac=7 @@ -172,7 +176,7 @@ touch $T/successes ds="`date +%Y.%m.%d-%H.%M.%S`-torture" startdate="`date`" -starttime="`awk 'BEGIN { print systime() }' < /dev/null`" +starttime="`get_starttime`" # tortureme flavor command # Note that "flavor" is an arbitrary string. Supply --torture if needed. @@ -274,17 +278,7 @@ then nfailures="`wc -l "$T/failures" | awk '{ print $1 }'`" ret=2 fi -duration="`awk -v starttime=$starttime ' -BEGIN { - s = systime() - starttime; - h = s / 3600; - d = h /24; - if (d < 1) - print h " hours"; - else - print d " days (" h " hours)"; -}' < /dev/null`" -echo Started at $startdate, ended at `date`, duration $duration. | tee -a $T/log +echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`" if test -n "$tdir" @@ -293,9 +287,9 @@ then fi exit $ret +# @@@ # RCU CPU stall warnings? # scftorture warnings? # Need a way for the invoker to specify clang. # Work out --configs based on number of available CPUs? -# Need a way to specify --configs. --configs--rcutorture? # Need to sense CPUs to size scftorture run. Ditto rcuscale and refscale. From 197220d4a3347aa2c21389235db4a4457e7dc0a7 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 23 Nov 2020 07:27:32 -0800 Subject: [PATCH 086/130] torture: Remove use of "eval" in torture.sh The bash "eval" command enables Bobby Tables attacks, which might not be a concern in torture testing by themselves, but one could imagine these combined with a cut-and-paste attack. This commit therefore gets rid of them. This comes at a price in terms of bash quoting not working nicely, so the "--bootargs" argument lists are now passed to torture_one via a bash-variable side channel. This might be a bit ugly, but it will also allow torture.sh to grow its own --bootargs parameter. While in the area, add proper header comments for the bash functions. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 55 +++++++++++++++---- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 16574046f7b3..0bd8e84d567b 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -178,11 +178,26 @@ ds="`date +%Y.%m.%d-%H.%M.%S`-torture" startdate="`date`" starttime="`get_starttime`" -# tortureme flavor command +# torture_one - Does a single kvm.sh run. +# +# Usage: +# torture_bootargs="[ kernel boot arguments ]" +# torture_one flavor [ kvm.sh arguments ] +# # Note that "flavor" is an arbitrary string. Supply --torture if needed. +# Note that quoting is problematic. So on the command line, pass multiple +# values with multiple kvm.sh argument instances. function torture_one { + local cur_bootargs= + local boottag= + echo " --- $curflavor:" Start `date` | tee -a $T/log - eval $* --datestamp "$ds/results-$curflavor" > $T/$curflavor.out 2>&1 + if test -n "$torture_bootargs" + then + boottag="--bootargs" + cur_bootargs="$torture_bootargs" + fi + "$@" $boottag "$cur_bootargs" --datestamp "$ds/results-$curflavor" > $T/$curflavor.out 2>&1 retcode=$? resdir="`grep '^Results directory: ' $T/$curflavor.out | tail -1 | sed -e 's/^Results directory: //'`" if test -n "$resdir" @@ -201,36 +216,48 @@ function torture_one { fi } +# torture_set - Does a set of tortures with and without KASAN and KCSAN. +# +# Usage: +# torture_bootargs="[ kernel boot arguments ]" +# torture_set flavor [ kvm.sh arguments ] +# +# Note that "flavor" is an arbitrary string. Supply --torture if needed. +# Note that quoting is problematic. So on the command line, pass multiple +# values with multiple kvm.sh argument instances. function torture_set { local flavor=$1 shift curflavor=$flavor - torture_one $* + torture_one "$@" if test "$do_kasan" = "yes" then curflavor=${flavor}-kasan - torture_one $* --kasan + torture_one "$@" --kasan fi if test "$do_kcsan" = "yes" then curflavor=${flavor}-kcsan - torture_one $* --kconfig '"CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y"' --kmake-arg "CC=clang" --kcsan + torture_one $* --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" --kmake-arg "CC=clang" --kcsan fi } if test "$do_rcutorture" = "yes" then - torture_set "rcutorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration '"$duration_rcutorture"' --configs "TREE10 4*CFLIST" --bootargs "rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000" --trust-make' + torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000" + torture_set "rcutorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE10 4*CFLIST" --trust-make fi if test "$do_locktorture" = "yes" then - torture_set "locktorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture lock --allcpus --duration '"$duration_locktorture"' --configs "14*CFLIST" --bootargs "torture.disable_onoff_at_boot" --trust-make' + torture_bootargs="torture.disable_onoff_at_boot" + torture_set "locktorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture lock --allcpus --duration "$duration_locktorture" --configs "14*CFLIST" --trust-make fi if test "$do_scftorture" = "yes" then - torture_set "scftorture" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration '"$duration_scftorture"' --kconfig "CONFIG_NR_CPUS=224" --bootargs "scftorture.nthreads=224 torture.disable_onoff_at_boot" --trust-make' + torture_bootargs="scftorture.nthreads=224 torture.disable_onoff_at_boot" + torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --kconfig "CONFIG_NR_CPUS=224" --trust-make fi if test "$do_refscale" = yes @@ -241,7 +268,8 @@ else fi for prim in $primlist do - torture_set "refscale-$prim" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --bootargs "refscale.scale_type='"$prim"' refscale.nreaders=224 refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" --trust-make' + torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=224 refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" + torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --trust-make done if test "$do_rcuscale" = yes @@ -252,12 +280,14 @@ else fi for prim in $primlist do - torture_set "rcuscale-$prim" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --bootargs "rcuscale.scale_type='"$prim"' rcuscale.nwriters=224 rcuscale.holdoff=20 torture.disable_onoff_at_boot" --trust-make' + torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=224 rcuscale.holdoff=20 torture.disable_onoff_at_boot" + torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --trust-make done if test "$do_kvfree" = "yes" then - torture_set "rcuscale-kvfree" 'tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=224" --bootargs "rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot" --trust-make' + torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot" + torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=224" --trust-make fi echo " --- " $scriptname $args @@ -293,3 +323,6 @@ exit $ret # Need a way for the invoker to specify clang. # Work out --configs based on number of available CPUs? # Need to sense CPUs to size scftorture run. Ditto rcuscale and refscale. +# --kconfig as with --bootargs (Both have overrides.) +# Command line parameters for --bootargs, --config, --kconfig, --kmake-arg, and --qemu-arg +# Ensure that build failures count as failures From a115a775a8d51c51c8c0b89649646a0e15a4978e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 11:33:05 -0800 Subject: [PATCH 087/130] torture: Add "make allmodconfig" to torture.sh This commit adds the ability to do "make allmodconfig" to torture.sh, given that normal rcutorture runs do not normally catch missing exports. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 37 ++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 0bd8e84d567b..57f2f317eba4 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -17,6 +17,9 @@ KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM PATH=${KVM}/bin:$PATH; export PATH . functions.sh +TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`" +MAKE_ALLOTED_CPUS=$((TORTURE_ALLOTED_CPUS*2)) + # Default duration and apportionment. duration_base=10 duration_rcutorture_frac=7 @@ -24,6 +27,7 @@ duration_locktorture_frac=1 duration_scftorture_frac=2 # "yes" or "no" parameters +do_allmodconfig=yes do_rcutorture=yes do_locktorture=yes do_scftorture=yes @@ -36,6 +40,7 @@ do_kcsan=no usage () { echo "Usage: $scriptname optional arguments:" echo " --doall" + echo " --doallmodconfig / --do-no-allmodconfig" echo " --do-kasan / --do-no-kasan" echo " --do-kcsan / --do-no-kcsan" echo " --do-kvfree / --do-no-kvfree" @@ -53,6 +58,7 @@ while test $# -gt 0 do case "$1" in --doall) + do_allmodconfig=yes do_rcutorture=yes do_locktorture=yes do_scftorture=yes @@ -62,6 +68,14 @@ do do_kasan=yes do_kcsan=yes ;; + --do-allmodconfig|--do-no-allmodconfig) + if test "$1" = --do-allmodconfig + then + do_allmodconfig=yes + else + do_allmodconfig=no + fi + ;; --do-kasan|--do-no-kasan) if test "$1" = --do-kasan then @@ -95,6 +109,7 @@ do fi ;; --do-none) + do_allmodconfig=no do_rcutorture=no do_locktorture=no do_scftorture=no @@ -242,6 +257,26 @@ function torture_set { fi } +# make allmodconfig +if test "$do_allmodconfig" = "yes" +then + echo " --- allmodconfig:" Start `date` | tee -a $T/log + amcdir="tools/testing/selftests/rcutorture/res/$ds/allmodconfig" + mkdir -p "$amcdir" + make -j$MAKE_ALLOTED_CPUS clean > "$amcdir/Make.out" 2>&1 + make -j$MAKE_ALLOTED_CPUS allmodconfig > "$amcdir/Make.out" 2>&1 + make -j$MAKE_ALLOTED_CPUS > "$amcdir/Make.out" 2>&1 + retcode="$?" + echo $retcode > "$amcdir/Make.exitcode" + if test "$retcode" == 0 + then + echo "allmodconfig($retcode)" $amcdir >> $T/successes + else + echo "allmodconfig($retcode)" $amcdir >> $T/failures + fi +fi + +# --torture rcu if test "$do_rcutorture" = "yes" then torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000" @@ -320,7 +355,7 @@ exit $ret # @@@ # RCU CPU stall warnings? # scftorture warnings? -# Need a way for the invoker to specify clang. +# Need a way for the invoker to specify clang. Maybe --kcsan-kmake or some such. # Work out --configs based on number of available CPUs? # Need to sense CPUs to size scftorture run. Ditto rcuscale and refscale. # --kconfig as with --bootargs (Both have overrides.) From 69d2b33e3f2077c57c20a3b718931746cb3a6094 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 12:42:28 -0800 Subject: [PATCH 088/130] torture: Auto-size SCF and scaling runs based on number of CPUs This commit improves torture.sh flexibility by autoscaling the number of CPUs to be used in variable-CPUs torture tests, including scftorture, refscale, rcuscale, and kvfree. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 57f2f317eba4..e13dacf258f4 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -19,6 +19,11 @@ PATH=${KVM}/bin:$PATH; export PATH TORTURE_ALLOTED_CPUS="`identify_qemu_vcpus`" MAKE_ALLOTED_CPUS=$((TORTURE_ALLOTED_CPUS*2)) +HALF_ALLOTED_CPUS=$((TORTURE_ALLOTED_CPUS/2)) +if test "$HALF_ALLOTED_CPUS" -lt 1 +then + HALF_ALLOTED_CPUS=1 +fi # Default duration and apportionment. duration_base=10 @@ -291,8 +296,8 @@ fi if test "$do_scftorture" = "yes" then - torture_bootargs="scftorture.nthreads=224 torture.disable_onoff_at_boot" - torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --kconfig "CONFIG_NR_CPUS=224" --trust-make + torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot" + torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make fi if test "$do_refscale" = yes @@ -303,8 +308,8 @@ else fi for prim in $primlist do - torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=224 refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --trust-make + torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" + torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make done if test "$do_rcuscale" = yes @@ -315,14 +320,14 @@ else fi for prim in $primlist do - torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=224 rcuscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=224" --trust-make + torture_bootargs="rcuscale.scale_type="$prim" rcuscale.nwriters=$HALF_ALLOTED_CPUS rcuscale.holdoff=20 torture.disable_onoff_at_boot" + torture_set "rcuscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make done if test "$do_kvfree" = "yes" then torture_bootargs="rcuscale.kfree_rcu_test=1 rcuscale.kfree_nthreads=16 rcuscale.holdoff=20 rcuscale.kfree_loops=10000 torture.disable_onoff_at_boot" - torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=224" --trust-make + torture_set "rcuscale-kvfree" tools/testing/selftests/rcutorture/bin/kvm.sh --torture rcuscale --allcpus --duration 10 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make fi echo " --- " $scriptname $args From 532017b11950a7042d130477747cced4b7e44199 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 16:28:01 -0800 Subject: [PATCH 089/130] torture: Enable torture.sh argument checking This commit uncomments the argument checking for the --duration argument to torture.sh. While in the area, it also corrects the duration units from seconds to minutes. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index e13dacf258f4..8e667975f9d2 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -157,17 +157,17 @@ do fi ;; --duration) - # checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(s\|m\|h\|d\|\)$' '^error' - mult=60 - if echo "$2" | grep -q 's$' + checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(m\|h\|d\|\)$' '^error' + mult=1 + if echo "$2" | grep -q 'm$' then mult=1 elif echo "$2" | grep -q 'h$' then - mult=3600 + mult=60 elif echo "$2" | grep -q 'd$' then - mult=86400 + mult=1440 fi ts=`echo $2 | sed -e 's/[smhd]$//'` duration_base=$(($ts*mult)) From 7a99487c76aad613b7533e3ea1b8d3eaf30ca37e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 18:57:47 -0800 Subject: [PATCH 090/130] torture: Make torture.sh rcuscale and refscale deal with allmodconfig The .mod.c files created by allmodconfig builds interfers with the approach torture.sh uses to enumerate types of rcuscale and refscale runs. This commit therefore tightens the pattern matching to avoid this interference. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 8e667975f9d2..a89b521f09dc 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -302,7 +302,7 @@ fi if test "$do_refscale" = yes then - primlist="`grep '\.name[ ]*=' kernel/rcu/refscale*.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" + primlist="`grep '\.name[ ]*=' kernel/rcu/refscale.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" else primlist= fi @@ -314,7 +314,7 @@ done if test "$do_rcuscale" = yes then - primlist="`grep '\.name[ ]*=' kernel/rcu/rcuscale*.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" + primlist="`grep '\.name[ ]*=' kernel/rcu/rcuscale.c | sed -e 's/^[^"]*"//' -e 's/".*$//'`" else primlist= fi From 264da4832b3af4a1a4cc83df1c5fe2d43429faa6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 24 Nov 2020 19:13:52 -0800 Subject: [PATCH 091/130] torture: Make torture.sh refscale runs use verbose_batched module parameter On large systems, the refscale printk() rate can overrun the file system's ability to accept console log messages. This commit therefore uses the new verbose_batched module parameter to rate-limit some of the higher-rate printk() calls. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index a89b521f09dc..a3c3c254bd26 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -24,6 +24,11 @@ if test "$HALF_ALLOTED_CPUS" -lt 1 then HALF_ALLOTED_CPUS=1 fi +VERBOSE_BATCH_CPUS=$((TORTURE_ALLOTED_CPUS/16)) +if test "$VERBOSE_BATCH_CPUS" -lt 2 +then + VERBOSE_BATCH_CPUS=0 +fi # Default duration and apportionment. duration_base=10 @@ -309,7 +314,7 @@ fi for prim in $primlist do torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make + torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS" --trust-make done if test "$do_rcuscale" = yes From c9a9d8e8f2e6f34e70701a1d1580eef9c76265ef Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Nov 2020 10:14:24 -0800 Subject: [PATCH 092/130] torture: Create doyesno helper function for torture.sh This commit saves a few lines of code by creating a doyesno helper bash function for argument parsing. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 78 +++++-------------- 1 file changed, 19 insertions(+), 59 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index a3c3c254bd26..a01079ea7473 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -47,6 +47,16 @@ do_kvfree=yes do_kasan=yes do_kcsan=no +# doyesno - Helper function for yes/no arguments +function doyesno () { + if test "$1" = "$2" + then + echo yes + else + echo no + fi +} + usage () { echo "Usage: $scriptname optional arguments:" echo " --doall" @@ -79,44 +89,19 @@ do do_kcsan=yes ;; --do-allmodconfig|--do-no-allmodconfig) - if test "$1" = --do-allmodconfig - then - do_allmodconfig=yes - else - do_allmodconfig=no - fi + do_allmodconfig=`doyesno "$1" --do-allmodconfig` ;; --do-kasan|--do-no-kasan) - if test "$1" = --do-kasan - then - do_kasan=yes - else - do_kasan=no - fi + do_kasan=`doyesno "$1" --do-kasan` ;; --do-kcsan|--do-no-kcsan) - if test "$1" = --do-kcsan - then - do_kcsan=yes - else - do_kcsan=no - fi + do_kcsan=`doyesno "$1" --do-kcsan` ;; --do-kvfree|--do-no-kvfree) - if test "$1" = --do-kvfree - then - do_kvfree=yes - else - do_kvfree=no - fi + do_kvfree=`doyesno "$1" --do-kvfree` ;; --do-locktorture|--do-no-locktorture) - if test "$1" = --do-locktorture - then - do_locktorture=yes - else - do_locktorture=no - fi + do_locktorture=`doyesno "$1" --do-locktorture` ;; --do-none) do_allmodconfig=no @@ -130,36 +115,16 @@ do do_kcsan=no ;; --do-rcuscale|--do-no-rcuscale) - if test "$1" = --do-rcuscale - then - do_rcuscale=yes - else - do_rcuscale=no - fi + do_rcuscale=`doyesno "$1" --do-rcuscale` ;; --do-rcutorture|--do-no-rcutorture) - if test "$1" = --do-rcutorture - then - do_rcutorture=yes - else - do_rcutorture=no - fi + do_rcutorture=`doyesno "$1" --do-rcutorture` ;; --do-refscale|--do-no-refscale) - if test "$1" = --do-refscale - then - do_refscale=yes - else - do_refscale=no - fi + do_refscale=`doyesno "$1" --do-refscale` ;; --do-scftorture|--do-no-scftorture) - if test "$1" = --do-scftorture - then - do_scftorture=yes - else - do_scftorture=no - fi + do_scftorture=`doyesno "$1" --do-scftorture` ;; --duration) checkarg --duration "(minutes)" $# "$2" '^[0-9][0-9]*\(m\|h\|d\|\)$' '^error' @@ -363,11 +328,6 @@ fi exit $ret # @@@ -# RCU CPU stall warnings? -# scftorture warnings? # Need a way for the invoker to specify clang. Maybe --kcsan-kmake or some such. -# Work out --configs based on number of available CPUs? -# Need to sense CPUs to size scftorture run. Ditto rcuscale and refscale. # --kconfig as with --bootargs (Both have overrides.) # Command line parameters for --bootargs, --config, --kconfig, --kmake-arg, and --qemu-arg -# Ensure that build failures count as failures From 1fe9cef42b6cf6491a2982f68fc495c92389ba7b Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Nov 2020 16:37:14 -0800 Subject: [PATCH 093/130] torture: Make torture.sh allmodconfig retain and label output This commit places "---" markers in the torture.sh script's allmodconfig output, and uses "<<" to avoid overwriting earlier output from this build test. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index a01079ea7473..e2c97f91cac3 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -238,9 +238,12 @@ then echo " --- allmodconfig:" Start `date` | tee -a $T/log amcdir="tools/testing/selftests/rcutorture/res/$ds/allmodconfig" mkdir -p "$amcdir" - make -j$MAKE_ALLOTED_CPUS clean > "$amcdir/Make.out" 2>&1 - make -j$MAKE_ALLOTED_CPUS allmodconfig > "$amcdir/Make.out" 2>&1 - make -j$MAKE_ALLOTED_CPUS > "$amcdir/Make.out" 2>&1 + echo " --- make clean" > "$amcdir/Make.out" 2>&1 + make -j$MAKE_ALLOTED_CPUS clean >> "$amcdir/Make.out" 2>&1 + echo " --- make allmodconfig" >> "$amcdir/Make.out" 2>&1 + make -j$MAKE_ALLOTED_CPUS allmodconfig >> "$amcdir/Make.out" 2>&1 + echo " --- make " >> "$amcdir/Make.out" 2>&1 + make -j$MAKE_ALLOTED_CPUS >> "$amcdir/Make.out" 2>&1 retcode="$?" echo $retcode > "$amcdir/Make.exitcode" if test "$retcode" == 0 From d97addc419e2b1cc1aba2ccc679373fbff7f2521 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Nov 2020 20:49:57 -0800 Subject: [PATCH 094/130] torture: Make torture.sh throttle VERBOSE_TOROUT_*() for refscale This commit causes torture.sh to use the torture.verbose_sleep_frequency kernel boot parameter to throttle verbose refscale output on large systems. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index e2c97f91cac3..f2f91407fa02 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -282,7 +282,7 @@ fi for prim in $primlist do torture_bootargs="refscale.scale_type="$prim" refscale.nreaders=$HALF_ALLOTED_CPUS refscale.loops=10000 refscale.holdoff=20 torture.disable_onoff_at_boot" - torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS" --trust-make + torture_set "refscale-$prim" tools/testing/selftests/rcutorture/bin/kvm.sh --torture refscale --allcpus --duration 5 --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --bootargs "verbose_batched=$VERBOSE_BATCH_CPUS torture.verbose_sleep_frequency=8 torture.verbose_sleep_duration=$VERBOSE_BATCH_CPUS" --trust-make done if test "$do_rcuscale" = yes From c679d90b21b76319b4a6c719442b6a1ff124b88d Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 26 Nov 2020 13:29:24 -0800 Subject: [PATCH 095/130] torture: Make torture.sh refuse to do zero-length runs This commit causes torture.sh to check for zero-length runs and to take the cowardly option of refusing to run them, logging its cowardice for later inspection. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index f2f91407fa02..43ef2c0d47c1 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -151,16 +151,29 @@ do shift done -duration_rcutorture=$((duration_base*duration_rcutorture_frac/10)) -# Need to sum remaining weights, and if duration weights to zero, -# set do_no_rcutorture. @@@ -duration_locktorture=$((duration_base*duration_locktorture_frac/10)) -duration_scftorture=$((duration_base*duration_scftorture_frac/10)) - T=/tmp/torture.sh.$$ trap 'rm -rf $T' 0 2 mkdir $T +duration_rcutorture=$((duration_base*duration_rcutorture_frac/10)) +if test "$duration_rcutorture" -eq 0 +then + echo " --- Zero time for rcutorture, disabling" | tee -a $T/log + do_rcutorture=no +fi +duration_locktorture=$((duration_base*duration_locktorture_frac/10)) +if test "$duration_locktorture" -eq 0 +then + echo " --- Zero time for locktorture, disabling" | tee -a $T/log + do_locktorture=no +fi +duration_scftorture=$((duration_base*duration_scftorture_frac/10)) +if test "$duration_scftorture" -eq 0 +then + echo " --- Zero time for scftorture, disabling" | tee -a $T/log + do_scftorture=no +fi + touch $T/failures touch $T/successes From 5ae5f7453f93b21e06296e78e8481ba8baaaa55e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 26 Nov 2020 21:27:27 -0800 Subject: [PATCH 096/130] torture: Drop log.long generation from torture.sh Now that kvm.sh puts all the relevant details in the "log" file, there is no need for torture.sh to generate a separate "log.long" file. This commit therefore drops this from torture.sh. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 43ef2c0d47c1..cf741236665a 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -203,11 +203,8 @@ function torture_one { "$@" $boottag "$cur_bootargs" --datestamp "$ds/results-$curflavor" > $T/$curflavor.out 2>&1 retcode=$? resdir="`grep '^Results directory: ' $T/$curflavor.out | tail -1 | sed -e 's/^Results directory: //'`" - if test -n "$resdir" + if test -z "$resdir" then - cp $T/$curflavor.out $resdir/log.long - echo retcode=$retcode >> $resdir/log.long - else cat $T/$curflavor.out | tee -a $T/log echo retcode=$retcode | tee -a $T/log fi From 8847bd4988321cbc66c94e9dfb05b401c50378a3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 27 Nov 2020 08:31:39 -0800 Subject: [PATCH 097/130] torture: Allow scenarios to be specified to torture.sh This commit adds --configs-rcutorture, --configs-locktorture, and --configs-scftorture arguments to torture.sh, allowing the desired set of scenarios to be passed to each. The default for each has been changed from a large-system-appropriate set to just CFLIST for each. Users are encouraged to create scripts that provide appropriate settings for their specific systems. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 46 +++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index cf741236665a..f614011bd3dd 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -30,6 +30,11 @@ then VERBOSE_BATCH_CPUS=0 fi +# Configurations/scenarios. +configs_rcutorture= +configs_locktorture= +configs_scftorture= + # Default duration and apportionment. duration_base=10 duration_rcutorture_frac=7 @@ -59,6 +64,9 @@ function doyesno () { usage () { echo "Usage: $scriptname optional arguments:" + echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\"" + echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\"" + echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\"" echo " --doall" echo " --doallmodconfig / --do-no-allmodconfig" echo " --do-kasan / --do-no-kasan" @@ -77,6 +85,21 @@ usage () { while test $# -gt 0 do case "$1" in + --config-rcutorture|--configs-rcutorture) + checkarg --configs-rcutorture "(list of config files)" "$#" "$2" '^[^/]\+$' '^--' + configs_rcutorture="$configs_rcutorture $2" + shift + ;; + --config-locktorture|--configs-locktorture) + checkarg --configs-locktorture "(list of config files)" "$#" "$2" '^[^/]\+$' '^--' + configs_locktorture="$configs_locktorture $2" + shift + ;; + --config-scftorture|--configs-scftorture) + checkarg --configs-scftorture "(list of config files)" "$#" "$2" '^[^/]\+$' '^--' + configs_scftorture="$configs_scftorture $2" + shift + ;; --doall) do_allmodconfig=yes do_rcutorture=yes @@ -155,18 +178,35 @@ T=/tmp/torture.sh.$$ trap 'rm -rf $T' 0 2 mkdir $T +# Calculate rcutorture defaults and apportion time +if test -z "$configs_rcutorture" +then + configs_rcutorture=CFLIST +fi duration_rcutorture=$((duration_base*duration_rcutorture_frac/10)) if test "$duration_rcutorture" -eq 0 then echo " --- Zero time for rcutorture, disabling" | tee -a $T/log do_rcutorture=no fi + +# Calculate locktorture defaults and apportion time +if test -z "$configs_locktorture" +then + configs_locktorture=CFLIST +fi duration_locktorture=$((duration_base*duration_locktorture_frac/10)) if test "$duration_locktorture" -eq 0 then echo " --- Zero time for locktorture, disabling" | tee -a $T/log do_locktorture=no fi + +# Calculate scftorture defaults and apportion time +if test -z "$configs_scftorture" +then + configs_scftorture=CFLIST +fi duration_scftorture=$((duration_base*duration_scftorture_frac/10)) if test "$duration_scftorture" -eq 0 then @@ -268,19 +308,19 @@ fi if test "$do_rcutorture" = "yes" then torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000" - torture_set "rcutorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE10 4*CFLIST" --trust-make + torture_set "rcutorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "$configs_rcutorture" --trust-make fi if test "$do_locktorture" = "yes" then torture_bootargs="torture.disable_onoff_at_boot" - torture_set "locktorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture lock --allcpus --duration "$duration_locktorture" --configs "14*CFLIST" --trust-make + torture_set "locktorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture lock --allcpus --duration "$duration_locktorture" --configs "$configs_locktorture" --trust-make fi if test "$do_scftorture" = "yes" then torture_bootargs="scftorture.nthreads=$HALF_ALLOTED_CPUS torture.disable_onoff_at_boot" - torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make + torture_set "scftorture" tools/testing/selftests/rcutorture/bin/kvm.sh --torture scf --allcpus --duration "$duration_scftorture" --configs "$configs_scftorture" --kconfig "CONFIG_NR_CPUS=$HALF_ALLOTED_CPUS" --trust-make fi if test "$do_refscale" = yes From c66c0f94b345600aea881f6c4a1dac0ff5dd1aa8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 27 Nov 2020 09:04:22 -0800 Subject: [PATCH 098/130] torture: Add command and results directory to torture.sh log This commit adds the command and arguments to the torture.sh log file, and also outputs the results directory. This latter allows impatient users to quickly find the results that are being generated by the current run. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/torture.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index f614011bd3dd..90ca73663999 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -174,10 +174,17 @@ do shift done +ds="`date +%Y.%m.%d-%H.%M.%S`-torture" +startdate="`date`" +starttime="`get_starttime`" + T=/tmp/torture.sh.$$ trap 'rm -rf $T' 0 2 mkdir $T +echo " --- " $scriptname $args | tee -a $T/log +echo " --- Results directory: " $ds | tee -a $T/log + # Calculate rcutorture defaults and apportion time if test -z "$configs_rcutorture" then @@ -217,10 +224,6 @@ fi touch $T/failures touch $T/successes -ds="`date +%Y.%m.%d-%H.%M.%S`-torture" -startdate="`date`" -starttime="`get_starttime`" - # torture_one - Does a single kvm.sh run. # # Usage: From c54e413822701a18e7cf6bada2028ea9a9ecdaf9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 27 Nov 2020 18:06:57 -0800 Subject: [PATCH 099/130] torture: Add --kcsan-kmake-arg to torture.sh for KCSAN In 2020, running KCSAN often requires careful choice of compiler. This commit therefore adds a --kcsan-kmake-arg parameter to torture.sh to allow specifying (for example) "CC=clang" to the kernel build process to correctly build a KCSAN-enabled kernel. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 90ca73663999..0867f30397af 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -34,6 +34,7 @@ fi configs_rcutorture= configs_locktorture= configs_scftorture= +kcsan_kmake_args= # Default duration and apportionment. duration_base=10 @@ -79,6 +80,7 @@ usage () { echo " --do-refscale / --do-no-refscale" echo " --do-scftorture / --do-no-scftorture" echo " --duration [ | h | d ]" + echo " --kcsan-kmake-arg kernel-make-arguments" exit 1 } @@ -166,6 +168,11 @@ do duration_base=$(($ts*mult)) shift ;; + --kcsan-kmake-arg|--kcsan-kmake-args) + checkarg --kcsan-kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$' + kcsan_kmake_args="`echo "$kcsan_kmake_args $2" | sed -e 's/^ *//' -e 's/ *$//'`" + shift + ;; *) echo Unknown argument $1 usage @@ -269,6 +276,8 @@ function torture_one { # Note that quoting is problematic. So on the command line, pass multiple # values with multiple kvm.sh argument instances. function torture_set { + local cur_kcsan_kmake_args= + local kcsan_kmake_tag= local flavor=$1 shift curflavor=$flavor @@ -281,7 +290,12 @@ function torture_set { if test "$do_kcsan" = "yes" then curflavor=${flavor}-kcsan - torture_one $* --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" --kmake-arg "CC=clang" --kcsan + if test -n "$kcsan_kmake_args" + then + kcsan_kmake_tag="--kmake-args" + cur_kcsan_kmake_args="$kcsan_kmake_args" + fi + torture_one $* --kconfig "CONFIG_DEBUG_LOCK_ALLOC=y CONFIG_PROVE_LOCKING=y" $kcsan_kmake_tag $cur_kcsan_kmake_args --kcsan fi } @@ -382,8 +396,3 @@ then cp $T/log $tdir fi exit $ret - -# @@@ -# Need a way for the invoker to specify clang. Maybe --kcsan-kmake or some such. -# --kconfig as with --bootargs (Both have overrides.) -# Command line parameters for --bootargs, --config, --kconfig, --kmake-arg, and --qemu-arg From e3e1a99787fcf6297990c3b6cf53f5f6ef5aed60 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 11 Dec 2020 14:03:39 -0800 Subject: [PATCH 100/130] torture: Compress KASAN vmlinux files The sizes of vmlinux files built with KASAN enabled can approach a full gigabyte, which can result in disk overflow sooner rather than later. Fortunately, the xz command compresses them by almost an order of magnitude. This commit therefore uses xz to compress vmlinux file built by torture.sh with KASAN enabled. However, xz is not the fastest thing in the world. In fact, it is way slower than rotating-rust mass storage. This commit therefore also adds a --compress-kasan-vmlinux argument to specify the degree of xz concurrency, which defaults to using all available CPUs if there are that many files in need of compression. Signed-off-by: Paul E. McKenney --- .../selftests/rcutorture/bin/torture.sh | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/rcutorture/bin/torture.sh b/tools/testing/selftests/rcutorture/bin/torture.sh index 0867f30397af..ad7525b7ac29 100755 --- a/tools/testing/selftests/rcutorture/bin/torture.sh +++ b/tools/testing/selftests/rcutorture/bin/torture.sh @@ -36,7 +36,8 @@ configs_locktorture= configs_scftorture= kcsan_kmake_args= -# Default duration and apportionment. +# Default compression, duration, and apportionment. +compress_kasan_vmlinux="`identify_qemu_vcpus`" duration_base=10 duration_rcutorture_frac=7 duration_locktorture_frac=1 @@ -65,6 +66,7 @@ function doyesno () { usage () { echo "Usage: $scriptname optional arguments:" + echo " --compress-kasan-vmlinux concurrency" echo " --configs-rcutorture \"config-file list w/ repeat factor (3*TINY01)\"" echo " --configs-locktorture \"config-file list w/ repeat factor (10*LOCK01)\"" echo " --configs-scftorture \"config-file list w/ repeat factor (2*CFLIST)\"" @@ -87,6 +89,11 @@ usage () { while test $# -gt 0 do case "$1" in + --compress-kasan-vmlinux) + checkarg --compress-kasan-vmlinux "(concurrency level)" $# "$2" '^[0-9][0-9]*$' '^error' + compress_kasan_vmlinux=$2 + shift + ;; --config-rcutorture|--configs-rcutorture) checkarg --configs-rcutorture "(list of config files)" "$#" "$2" '^[^/]\+$' '^--' configs_rcutorture="$configs_rcutorture $2" @@ -391,8 +398,45 @@ fi echo Started at $startdate, ended at `date`, duration `get_starttime_duration $starttime`. | tee -a $T/log echo Summary: Successes: $nsuccesses Failures: $nfailures. | tee -a $T/log tdir="`cat $T/successes $T/failures | head -1 | awk '{ print $NF }' | sed -e 's,/[^/]\+/*$,,'`" +if test -n "$tdir" && test $compress_kasan_vmlinux -gt 0 +then + # KASAN vmlinux files can approach 1GB in size, so compress them. + echo Looking for KASAN files to compress: `date` > "$tdir/log-xz" 2>&1 + find "$tdir" -type d -name '*-kasan' -print > $T/xz-todo + ncompresses=0 + batchno=1 + if test -s $T/xz-todo + then + echo Size before compressing: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log + for i in `cat $T/xz-todo` + do + echo Compressing vmlinux files in ${i}: `date` >> "$tdir/log-xz" 2>&1 + for j in $i/*/vmlinux + do + xz "$j" >> "$tdir/log-xz" 2>&1 & + ncompresses=$((ncompresses+1)) + if test $ncompresses -ge $compress_kasan_vmlinux + then + echo Waiting for batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log + wait + ncompresses=0 + batchno=$((batchno+1)) + fi + done + done + if test $ncompresses -gt 0 + then + echo Waiting for final batch $batchno of $ncompresses compressions `date` | tee -a "$tdir/log-xz" | tee -a $T/log + fi + wait + echo Size after compressing: `du -sh $tdir | awk '{ print $1 }'` `date` 2>&1 | tee -a "$tdir/log-xz" | tee -a $T/log + echo Total duration `get_starttime_duration $starttime`. | tee -a $T/log + else + echo No compression needed: `date` >> "$tdir/log-xz" 2>&1 + fi +fi if test -n "$tdir" then - cp $T/log $tdir + cp $T/log "$tdir" fi exit $ret From e76506f0e85129d726c487c873a2245c92446515 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 15 Nov 2020 10:24:52 -0800 Subject: [PATCH 101/130] refscale: Allow summarization of verbose output The refscale test prints enough per-kthread console output to provoke RCU CPU stall warnings on large systems. This commit therefore allows this output to be summarized. For example, the refscale.verbose_batched=32 boot parameter would causes only every 32nd line of output to be logged. Signed-off-by: Paul E. McKenney --- .../admin-guide/kernel-parameters.txt | 6 ++++++ kernel/rcu/refscale.c | 21 ++++++++++++++----- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c722ec19cd00..3244f9e04a64 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4557,6 +4557,12 @@ refscale.verbose= [KNL] Enable additional printk() statements. + refscale.verbose_batched= [KNL] + Batch the additional printk() statements. If zero + (the default) or negative, print everything. Otherwise, + print every Nth verbose statement, where N is the value + specified. + relax_domain_level= [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/admin-guide/cgroup-v1/cpusets.rst. diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 23ff36a66f97..3da246f2ccf5 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -46,6 +46,16 @@ #define VERBOSE_SCALEOUT(s, x...) \ do { if (verbose) pr_alert("%s" SCALE_FLAG s, scale_type, ## x); } while (0) +static atomic_t verbose_batch_ctr; + +#define VERBOSE_SCALEOUT_BATCH(s, x...) \ +do { \ + if (verbose && \ + (verbose_batched <= 0 || \ + !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) \ + pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \ +} while (0) + #define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \ do { if (verbose) pr_alert("%s" SCALE_FLAG "!!! " s, scale_type, ## x); } while (0) @@ -57,6 +67,7 @@ module_param(scale_type, charp, 0444); MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock."); torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); +torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s"); // Wait until there are multiple CPUs before starting test. torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0, @@ -368,14 +379,14 @@ ref_scale_reader(void *arg) u64 start; s64 duration; - VERBOSE_SCALEOUT("ref_scale_reader %ld: task started", me); + VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me); set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); set_user_nice(current, MAX_NICE); atomic_inc(&n_init); if (holdoff) schedule_timeout_interruptible(holdoff * HZ); repeat: - VERBOSE_SCALEOUT("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id()); + VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, smp_processor_id()); // Wait for signal that this reader can start. wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) || @@ -392,7 +403,7 @@ repeat: while (atomic_read_acquire(&n_started)) cpu_relax(); - VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d started", me, exp_idx); + VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx); // To reduce noise, do an initial cache-warming invocation, check @@ -421,8 +432,8 @@ repeat: if (atomic_dec_and_test(&nreaders_exp)) wake_up(&main_wq); - VERBOSE_SCALEOUT("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", - me, exp_idx, atomic_read(&nreaders_exp)); + VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)", + me, exp_idx, atomic_read(&nreaders_exp)); if (!torture_must_stop()) goto repeat; From 12a910e3cd3d11e00b2a2df24ea995ffa3e27ae5 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 16 Nov 2020 16:01:50 -0800 Subject: [PATCH 102/130] rcutorture: Require entire stutter period be post-boot Currently, the rcu_torture_writer() function checks that all required grace periods elapse during a stutter interval, which is a multi-second time period during which the test load is removed. However, this check is suppressed during early boot (that is, before init is spawned) in order to avoid false positives that otherwise occur due to heavy load on the single boot CPU. Unfortunately, this approach is insufficient. It is possible that the stutter interval might end just as init is spawned, so that early boot conditions prevailed during almost the entire stutter interval. This commit therefore takes a snapshot of boot-complete state just before the stutter interval, thus suppressing the check for failure to complete grace periods unless the entire stutter interval took place after early boot. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 338e1182b4b5..1930d92f4d15 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1070,6 +1070,7 @@ rcu_torture_fqs(void *arg) static int rcu_torture_writer(void *arg) { + bool boot_ended; bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); unsigned long cookie; int expediting = 0; @@ -1239,12 +1240,13 @@ rcu_torture_writer(void *arg) !rcu_gp_is_normal(); } rcu_torture_writer_state = RTWS_STUTTER; + boot_ended = rcu_inkernel_boot_has_ended(); stutter_waited = stutter_wait("rcu_torture_writer"); if (stutter_waited && !READ_ONCE(rcu_fwd_cb_nodelay) && !cur_ops->slow_gps && !torture_must_stop() && - rcu_inkernel_boot_has_ended()) + boot_ended) for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) if (list_empty(&rcu_tortures[i].rtort_free) && rcu_access_pointer(rcu_torture_current) != From 18fbf307b7319af3725c36e16af6ae9f35a8699c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 16 Nov 2020 16:46:06 -0800 Subject: [PATCH 103/130] rcutorture: Make synctype[] and nsynctype be static global Full testing of the new SRCU polling API requires that the fake writers also use it in order to test concurrent calls to all of the API members, especially start_poll_synchronize_srcu(). This commit prepares the ground for this by making the synctype[] and nsynctype variables be static globals so that the rcu_torture_fakewriter() function can access them. Initialization of these variables is moved from rcu_torture_writer() to a new rcu_torture_write_types() function that is invoked from rcu_torture_init() just before the first writer kthread is spawned. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 62 ++++++++++++++++++++++++----------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 1930d92f4d15..0d257e2dc9c6 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1062,37 +1062,18 @@ rcu_torture_fqs(void *arg) return 0; } +// Used by writers to randomly choose from the available grace-period +// primitives. The only purpose of the initialization is to size the array. +static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; +static int nsynctypes; + /* - * RCU torture writer kthread. Repeatedly substitutes a new structure - * for that pointed to by rcu_torture_current, freeing the old structure - * after a series of grace periods (the "pipeline"). + * Determine which grace-period primitives are available. */ -static int -rcu_torture_writer(void *arg) +static void rcu_torture_write_types(void) { - bool boot_ended; - bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); - unsigned long cookie; - int expediting = 0; - unsigned long gp_snap; bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; - int i; - int idx; - int oldnice = task_nice(current); - struct rcu_torture *rp; - struct rcu_torture *old_rp; - static DEFINE_TORTURE_RANDOM(rand); - bool stutter_waited; - int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, - RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; - int nsynctypes = 0; - - VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); - if (!can_expedite) - pr_alert("%s" TORTURE_FLAG - " GP expediting controlled from boot/sysfs for %s.\n", - torture_type, cur_ops->name); /* Initialize synctype[] array. If none set, take default. */ if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) @@ -1127,6 +1108,34 @@ rcu_torture_writer(void *arg) } else if (gp_sync && !cur_ops->sync) { pr_alert("%s: gp_sync without primitives.\n", __func__); } +} + +/* + * RCU torture writer kthread. Repeatedly substitutes a new structure + * for that pointed to by rcu_torture_current, freeing the old structure + * after a series of grace periods (the "pipeline"). + */ +static int +rcu_torture_writer(void *arg) +{ + bool boot_ended; + bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); + unsigned long cookie; + int expediting = 0; + unsigned long gp_snap; + int i; + int idx; + int oldnice = task_nice(current); + struct rcu_torture *rp; + struct rcu_torture *old_rp; + static DEFINE_TORTURE_RANDOM(rand); + bool stutter_waited; + + VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); + if (!can_expedite) + pr_alert("%s" TORTURE_FLAG + " GP expediting controlled from boot/sysfs for %s.\n", + torture_type, cur_ops->name); if (WARN_ONCE(nsynctypes == 0, "rcu_torture_writer: No update-side primitives.\n")) { /* @@ -2889,6 +2898,7 @@ rcu_torture_init(void) /* Start up the kthreads. */ + rcu_torture_write_types(); firsterr = torture_create_kthread(rcu_torture_writer, NULL, writer_task); if (firsterr) From 682189a3f874db57b3e755512f2a2953f61fc54e Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 16 Nov 2020 17:10:39 -0800 Subject: [PATCH 104/130] rcutorture: Make rcu_torture_fakewriter() use blocking wait primitives Full testing of the new SRCU polling API requires that the fake writers also use it in order to test concurrent calls to all of the API members, especially start_poll_synchronize_srcu(). This commit makes rcu_torture_fakewriter() use all available blocking grace-period-wait primitives available from the RCU flavor under test. Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0d257e2dc9c6..03bdf6752fe4 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1289,6 +1289,8 @@ rcu_torture_writer(void *arg) static int rcu_torture_fakewriter(void *arg) { + unsigned long gp_snap; + int i; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); @@ -1300,15 +1302,37 @@ rcu_torture_fakewriter(void *arg) if (cur_ops->cb_barrier != NULL && torture_random(&rand) % (nfakewriters * 8) == 0) { cur_ops->cb_barrier(); - } else if (gp_normal == gp_exp) { - if (cur_ops->sync && torture_random(&rand) & 0x80) - cur_ops->sync(); - else if (cur_ops->exp_sync) + } else { + switch (synctype[torture_random(&rand) % nsynctypes]) { + case RTWS_DEF_FREE: + break; + case RTWS_EXP_SYNC: cur_ops->exp_sync(); - } else if (gp_normal && cur_ops->sync) { - cur_ops->sync(); - } else if (cur_ops->exp_sync) { - cur_ops->exp_sync(); + break; + case RTWS_COND_GET: + gp_snap = cur_ops->get_gp_state(); + i = torture_random(&rand) % 16; + if (i != 0) + schedule_timeout_interruptible(i); + udelay(torture_random(&rand) % 1000); + cur_ops->cond_sync(gp_snap); + break; + case RTWS_POLL_GET: + gp_snap = cur_ops->start_gp_poll(); + while (!cur_ops->poll_gp_state(gp_snap)) { + i = torture_random(&rand) % 16; + if (i != 0) + schedule_timeout_interruptible(i); + udelay(torture_random(&rand) % 1000); + } + break; + case RTWS_SYNC: + cur_ops->sync(); + break; + default: + WARN_ON_ONCE(1); + break; + } } stutter_wait("rcu_torture_fakewriter"); } while (!torture_must_stop()); From ae19aaafae95a5487469433e9cae4c208f8d15cd Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 11:30:18 -0800 Subject: [PATCH 105/130] torture: Add fuzzed hrtimer-based sleep functions This commit adds torture_hrtimeout_ns(), torture_hrtimeout_us(), torture_hrtimeout_ms(), torture_hrtimeout_jiffies(), and torture_hrtimeout_s(), each of which uses hrtimers to block for a fuzzed time interval. These functions are intended to be used by the various torture tests to decouple wakeups from the timer wheel, thus providing more opportunity for Murphy to insert destructive race conditions. Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 7 ++++ kernel/torture.c | 75 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/include/linux/torture.h b/include/linux/torture.h index 7f65bd1dd307..32941f8106b2 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -61,6 +61,13 @@ static inline void torture_random_init(struct torture_random_state *trsp) trsp->trs_count = 0; } +/* Definitions for high-resolution-timer sleeps. */ +int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp); +int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp); +int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp); +int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp); +int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp); + /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); int torture_shuffle_init(long shuffint); diff --git a/kernel/torture.c b/kernel/torture.c index 8562ac18d2eb..7548634e8a89 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -58,6 +58,81 @@ static int verbose; static int fullstop = FULLSTOP_RMMOD; static DEFINE_MUTEX(fullstop_mutex); +/* + * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit + * nanosecond random fuzz. This function and its friends desynchronize + * testing from the timer wheel. + */ +int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp) +{ + ktime_t hto = baset_ns; + + if (trsp) + hto += (torture_random(trsp) >> 3) % fuzzt_ns; + set_current_state(TASK_UNINTERRUPTIBLE); + return schedule_hrtimeout(&hto, HRTIMER_MODE_REL); +} +EXPORT_SYMBOL_GPL(torture_hrtimeout_ns); + +/* + * Schedule a high-resolution-timer sleep in microseconds, with a 32-bit + * nanosecond (not microsecond!) random fuzz. + */ +int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp) +{ + ktime_t baset_ns = baset_us * NSEC_PER_USEC; + + return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp); +} +EXPORT_SYMBOL_GPL(torture_hrtimeout_us); + +/* + * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit + * microsecond (not millisecond!) random fuzz. + */ +int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp) +{ + ktime_t baset_ns = baset_ms * NSEC_PER_MSEC; + u32 fuzzt_ns; + + if ((u32)~0U / NSEC_PER_USEC < fuzzt_us) + fuzzt_ns = (u32)~0U; + else + fuzzt_ns = fuzzt_us * NSEC_PER_USEC; + return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp); +} +EXPORT_SYMBOL_GPL(torture_hrtimeout_ms); + +/* + * Schedule a high-resolution-timer sleep in jiffies, with an + * implied one-jiffy random fuzz. This is intended to replace calls to + * schedule_timeout_interruptible() and friends. + */ +int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp) +{ + ktime_t baset_ns = jiffies_to_nsecs(baset_j); + + return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp); +} +EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies); + +/* + * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit + * millisecond (not second!) random fuzz. + */ +int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp) +{ + ktime_t baset_ns = baset_s * NSEC_PER_SEC; + u32 fuzzt_ns; + + if ((u32)~0U / NSEC_PER_MSEC < fuzzt_ms) + fuzzt_ns = (u32)~0U; + else + fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC; + return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp); +} +EXPORT_SYMBOL_GPL(torture_hrtimeout_s); + #ifdef CONFIG_HOTPLUG_CPU /* From ea31fd9ca87399ac4e03cd6c215451fa7dc366e4 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 11:32:54 -0800 Subject: [PATCH 106/130] rcutorture: Use torture_hrtimeout_jiffies() to avoid busy-waits Because rcu_torture_writer() and rcu_torture_fakewriter() predate hrtimers, they do timer-wheel-decoupled timed waits by using the timer-wheel-based schedule_timeout_interruptible() functions in conjunction with a random udelay()-based wait. This latter unnecessarily burns CPU time, so this commit instead uses torture_hrtimeout_jiffies() to decouple from the timer wheels without busy-waiting. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 03bdf6752fe4..9414e3027a04 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1194,10 +1194,7 @@ rcu_torture_writer(void *arg) case RTWS_COND_GET: rcu_torture_writer_state = RTWS_COND_GET; gp_snap = cur_ops->get_gp_state(); - i = torture_random(&rand) % 16; - if (i != 0) - schedule_timeout_interruptible(i); - udelay(torture_random(&rand) % 1000); + torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); rcu_torture_writer_state = RTWS_COND_SYNC; cur_ops->cond_sync(gp_snap); rcu_torture_pipe_update(old_rp); @@ -1206,12 +1203,9 @@ rcu_torture_writer(void *arg) rcu_torture_writer_state = RTWS_POLL_GET; gp_snap = cur_ops->start_gp_poll(); rcu_torture_writer_state = RTWS_POLL_WAIT; - while (!cur_ops->poll_gp_state(gp_snap)) { - i = torture_random(&rand) % 16; - if (i != 0) - schedule_timeout_interruptible(i); - udelay(torture_random(&rand) % 1000); - } + while (!cur_ops->poll_gp_state(gp_snap)) + torture_hrtimeout_jiffies(torture_random(&rand) % 16, + &rand); rcu_torture_pipe_update(old_rp); break; case RTWS_SYNC: @@ -1290,7 +1284,6 @@ static int rcu_torture_fakewriter(void *arg) { unsigned long gp_snap; - int i; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); @@ -1311,19 +1304,14 @@ rcu_torture_fakewriter(void *arg) break; case RTWS_COND_GET: gp_snap = cur_ops->get_gp_state(); - i = torture_random(&rand) % 16; - if (i != 0) - schedule_timeout_interruptible(i); - udelay(torture_random(&rand) % 1000); + torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); cur_ops->cond_sync(gp_snap); break; case RTWS_POLL_GET: gp_snap = cur_ops->start_gp_poll(); while (!cur_ops->poll_gp_state(gp_snap)) { - i = torture_random(&rand) % 16; - if (i != 0) - schedule_timeout_interruptible(i); - udelay(torture_random(&rand) % 1000); + torture_hrtimeout_jiffies(torture_random(&rand) % 16, + &rand); } break; case RTWS_SYNC: From ed24affa71f7abf7d81698a99b6c2623491a35b0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 12:17:42 -0800 Subject: [PATCH 107/130] torture: Make stutter use torture_hrtimeout_*() functions This commit saves a few lines of code by making the stutter_wait() and torture_stutter() functions use torture_hrtimeout_jiffies() and torture_hrtimeout_us(). Signed-off-by: Paul E. McKenney --- kernel/torture.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/kernel/torture.c b/kernel/torture.c index 7548634e8a89..7ad5c9681580 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -677,7 +677,6 @@ static int stutter_gap; */ bool stutter_wait(const char *title) { - ktime_t delay; unsigned int i = 0; bool ret = false; int spt; @@ -693,11 +692,8 @@ bool stutter_wait(const char *title) schedule_timeout_interruptible(1); } else if (spt == 2) { while (READ_ONCE(stutter_pause_test)) { - if (!(i++ & 0xffff)) { - set_current_state(TASK_INTERRUPTIBLE); - delay = 10 * NSEC_PER_USEC; - schedule_hrtimeout(&delay, HRTIMER_MODE_REL); - } + if (!(i++ & 0xffff)) + torture_hrtimeout_us(10, 0, NULL); cond_resched(); } } else { @@ -715,7 +711,6 @@ EXPORT_SYMBOL_GPL(stutter_wait); */ static int torture_stutter(void *arg) { - ktime_t delay; DEFINE_TORTURE_RANDOM(rand); int wtime; @@ -726,20 +721,15 @@ static int torture_stutter(void *arg) if (stutter > 2) { WRITE_ONCE(stutter_pause_test, 1); wtime = stutter - 3; - delay = ktime_divns(NSEC_PER_SEC * wtime, HZ); - delay += (torture_random(&rand) >> 3) % NSEC_PER_MSEC; - set_current_state(TASK_INTERRUPTIBLE); - schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + torture_hrtimeout_jiffies(wtime, &rand); wtime = 2; } WRITE_ONCE(stutter_pause_test, 2); - delay = ktime_divns(NSEC_PER_SEC * wtime, HZ); - set_current_state(TASK_INTERRUPTIBLE); - schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + torture_hrtimeout_jiffies(wtime, NULL); } WRITE_ONCE(stutter_pause_test, 0); if (!torture_must_stop()) - schedule_timeout_interruptible(stutter_gap); + torture_hrtimeout_jiffies(stutter_gap, NULL); torture_shutdown_absorb("torture_stutter"); } while (!torture_must_stop()); torture_kthread_stopping("torture_stutter"); From 1eba0ef981fd3b5d5e94243aeced8884f43aef50 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 17 Nov 2020 14:12:24 -0800 Subject: [PATCH 108/130] rcutorture: Use hrtimers for reader and writer delays This commit replaces schedule_timeout_uninterruptible() and schedule_timeout_interruptible() with torture_hrtimeout_us() and torture_hrtimeout_jiffies() to avoid timer-wheel synchronization. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 9414e3027a04..007595d4783f 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1149,7 +1149,7 @@ rcu_torture_writer(void *arg) do { rcu_torture_writer_state = RTWS_FIXED_DELAY; - schedule_timeout_uninterruptible(1); + torture_hrtimeout_us(500, 1000, &rand); rp = rcu_torture_alloc(); if (rp == NULL) continue; @@ -1290,8 +1290,7 @@ rcu_torture_fakewriter(void *arg) set_user_nice(current, MAX_NICE); do { - schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); - udelay(torture_random(&rand) & 0x3ff); + torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); if (cur_ops->cb_barrier != NULL && torture_random(&rand) % (nfakewriters * 8) == 0) { cur_ops->cb_barrier(); @@ -1656,7 +1655,7 @@ rcu_torture_reader(void *arg) if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) schedule_timeout_interruptible(HZ); if (time_after(jiffies, lastsleep) && !torture_must_stop()) { - schedule_timeout_interruptible(1); + torture_hrtimeout_us(500, 1000, &rand); lastsleep = jiffies + 10; } while (num_online_cpus() < mynumonline && !torture_must_stop()) From 414c116e016584137118067f506125f6ace6128c Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Nov 2020 10:50:35 -0800 Subject: [PATCH 109/130] torture: Make refscale throttle high-rate printk()s This commit adds a short delay for verbose_batched-throttled printk()s to further decrease console flooding. Signed-off-by: Paul E. McKenney --- kernel/rcu/refscale.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/refscale.c b/kernel/rcu/refscale.c index 3da246f2ccf5..02dd9767b559 100644 --- a/kernel/rcu/refscale.c +++ b/kernel/rcu/refscale.c @@ -52,8 +52,10 @@ static atomic_t verbose_batch_ctr; do { \ if (verbose && \ (verbose_batched <= 0 || \ - !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) \ + !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) { \ + schedule_timeout_uninterruptible(1); \ pr_alert("%s" SCALE_FLAG s, scale_type, ## x); \ + } \ } while (0) #define VERBOSE_SCALEOUT_ERRSTRING(s, x...) \ From 8a67a20bf257ca378d6e5588fbe4382966395ac8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 25 Nov 2020 13:00:04 -0800 Subject: [PATCH 110/130] torture: Throttle VERBOSE_TOROUT_*() output This commit adds kernel boot parameters torture.verbose_sleep_frequency and torture.verbose_sleep_duration, which allow VERBOSE_TOROUT_*() output to be throttled with periodic sleeps on large systems. Signed-off-by: Paul E. McKenney --- .../admin-guide/kernel-parameters.txt | 8 ++++++++ include/linux/torture.h | 15 ++++++++++++-- kernel/torture.c | 20 +++++++++++++++++++ 3 files changed, 41 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3244f9e04a64..18f3aa759e54 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5337,6 +5337,14 @@ are running concurrently, especially on systems with rotating-rust storage. + torture.verbose_sleep_frequency= [KNL] + Specifies how many verbose printk()s should be + emitted between each sleep. The default of zero + disables verbose-printk() sleeping. + + torture.verbose_sleep_duration= [KNL] + Duration of each verbose-printk() sleep in jiffies. + tp720= [HW,PS2] tpm_suspend_pcr=[HW,TPM] diff --git a/include/linux/torture.h b/include/linux/torture.h index 32941f8106b2..d62d13c8c69a 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -32,9 +32,20 @@ #define TOROUT_STRING(s) \ pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s) #define VERBOSE_TOROUT_STRING(s) \ - do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) +do { \ + if (verbose) { \ + verbose_torout_sleep(); \ + pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); \ + } \ +} while (0) #define VERBOSE_TOROUT_ERRSTRING(s) \ - do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) +do { \ + if (verbose) { \ + verbose_torout_sleep(); \ + pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); \ + } \ +} while (0) +void verbose_torout_sleep(void); /* Definitions for online/offline exerciser. */ typedef void torture_ofl_func(void); diff --git a/kernel/torture.c b/kernel/torture.c index 7ad5c9681580..93eeeb2b3d88 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -48,6 +48,12 @@ module_param(disable_onoff_at_boot, bool, 0444); static bool ftrace_dump_at_shutdown; module_param(ftrace_dump_at_shutdown, bool, 0444); +static int verbose_sleep_frequency; +module_param(verbose_sleep_frequency, int, 0444); + +static int verbose_sleep_duration = 1; +module_param(verbose_sleep_duration, int, 0444); + static char *torture_type; static int verbose; @@ -58,6 +64,20 @@ static int verbose; static int fullstop = FULLSTOP_RMMOD; static DEFINE_MUTEX(fullstop_mutex); +static atomic_t verbose_sleep_counter; + +/* + * Sleep if needed from VERBOSE_TOROUT*(). + */ +void verbose_torout_sleep(void) +{ + if (verbose_sleep_frequency > 0 && + verbose_sleep_duration > 0 && + !(atomic_inc_return(&verbose_sleep_counter) % verbose_sleep_frequency)) + schedule_timeout_uninterruptible(verbose_sleep_duration); +} +EXPORT_SYMBOL_GPL(verbose_torout_sleep); + /* * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit * nanosecond random fuzz. This function and its friends desynchronize From edf7b8417834c89d00ef88355ea507b0b0a630ae Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 2 Dec 2020 17:52:07 -0800 Subject: [PATCH 111/130] rcutorture: Make object_debug also double call_rcu() heap object This commit provides a test for call_rcu() printing the allocation address of a double-freed callback by double-freeing a callback allocated via kmalloc(). However, this commit does not depend on any other commit. Signed-off-by: Paul E. McKenney --- kernel/rcu/rcutorture.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 007595d4783f..76c838696366 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -2782,6 +2782,7 @@ static void rcu_test_debug_objects(void) #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD struct rcu_head rh1; struct rcu_head rh2; + struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); init_rcu_head_on_stack(&rh1); init_rcu_head_on_stack(&rh2); @@ -2794,6 +2795,10 @@ static void rcu_test_debug_objects(void) local_irq_disable(); /* Make it harder to start a new grace period. */ call_rcu(&rh2, rcu_torture_leak_cb); call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ + if (rhp) { + call_rcu(rhp, rcu_torture_leak_cb); + call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ + } local_irq_enable(); rcu_read_unlock(); preempt_enable(); From 0b962c8fe0e5c72a252b236814a6b6e9df799061 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 19 Dec 2020 07:05:58 -0800 Subject: [PATCH 112/130] torture: Clean up after torture-test CPU hotplugging This commit puts all CPUs back online at the end of a torture test, and also unconditionally puts them online at the beginning of the test, rather than just in the case of built-in tests. This allows torture tests to behave in a predictable manner, whether built-in or based on modules. Signed-off-by: Paul E. McKenney --- kernel/torture.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/kernel/torture.c b/kernel/torture.c index 93eeeb2b3d88..507a20be6950 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -291,6 +291,26 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, } EXPORT_SYMBOL_GPL(torture_online); +/* + * Get everything online at the beginning and ends of tests. + */ +static void torture_online_all(char *phase) +{ + int cpu; + int ret; + + for_each_possible_cpu(cpu) { + if (cpu_online(cpu)) + continue; + ret = add_cpu(cpu); + if (ret && verbose) { + pr_alert("%s" TORTURE_FLAG + "%s: %s online %d: errno %d\n", + __func__, phase, torture_type, cpu, ret); + } + } +} + /* * Execute random CPU-hotplug operations at the interval specified * by the onoff_interval. @@ -301,25 +321,12 @@ torture_onoff(void *arg) int cpu; int maxcpu = -1; DEFINE_TORTURE_RANDOM(rand); - int ret; VERBOSE_TOROUT_STRING("torture_onoff task started"); for_each_online_cpu(cpu) maxcpu = cpu; WARN_ON(maxcpu < 0); - if (!IS_MODULE(CONFIG_TORTURE_TEST)) { - for_each_possible_cpu(cpu) { - if (cpu_online(cpu)) - continue; - ret = add_cpu(cpu); - if (ret && verbose) { - pr_alert("%s" TORTURE_FLAG - "%s: Initial online %d: errno %d\n", - __func__, torture_type, cpu, ret); - } - } - } - + torture_online_all("Initial"); if (maxcpu == 0) { VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); goto stop; @@ -347,6 +354,7 @@ torture_onoff(void *arg) stop: torture_kthread_stopping("torture_onoff"); + torture_online_all("Final"); return 0; } From 1afb95fee0342b8d9e05b0433e8e44a6dfd7c4a3 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sat, 19 Dec 2020 07:34:35 -0800 Subject: [PATCH 113/130] torture: Maintain torture-specific set of CPUs-online books The TREE01 rcutorture scenario intentionally creates confusion as to the number of available CPUs by specifying the "maxcpus=8 nr_cpus=43" kernel boot parameters. This can disable rcutorture's load shedding, which currently uses num_online_cpus(), which would count the extra 35 CPUs. However, the rcutorture guest OS will be provisioned with only 8 CPUs, which means that rcutorture will present full load even when all but one of the original 8 CPUs are offline. This can result in spurious errors due to extreme overloading of that single remaining CPU. This commit therefore keeps a separate set of books on the number of usable online CPUs, so that torture_num_online_cpus() is used for load shedding instead of num_online_cpus(). Note that initial sizing must use num_online_cpus() because torture_num_online_cpus() will return NR_CPUS until shortly after torture_onoff_init() is invoked. Reported-by: Frederic Weisbecker [ paulmck: Apply feedback from kernel test robot. ] Signed-off-by: Paul E. McKenney --- include/linux/torture.h | 5 +++++ kernel/rcu/rcutorture.c | 4 ++-- kernel/torture.c | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/include/linux/torture.h b/include/linux/torture.h index d62d13c8c69a..0910c5803f35 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -48,6 +48,11 @@ do { \ void verbose_torout_sleep(void); /* Definitions for online/offline exerciser. */ +#ifdef CONFIG_HOTPLUG_CPU +int torture_num_online_cpus(void); +#else /* #ifdef CONFIG_HOTPLUG_CPU */ +static inline int torture_num_online_cpus(void) { return 1; } +#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ typedef void torture_ofl_func(void); bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, unsigned long *sum_offl, int *min_onl, int *max_onl); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 76c838696366..a816df4e86e0 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -1338,7 +1338,7 @@ static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, struct torture_random_state *trsp) { unsigned long loops; - int noc = num_online_cpus(); + int noc = torture_num_online_cpus(); int rdrchked; int rdrchker; struct rcu_torture_reader_check *rtrcp; // Me. @@ -1658,7 +1658,7 @@ rcu_torture_reader(void *arg) torture_hrtimeout_us(500, 1000, &rand); lastsleep = jiffies + 10; } - while (num_online_cpus() < mynumonline && !torture_must_stop()) + while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) schedule_timeout_interruptible(HZ / 5); stutter_wait("rcu_torture_reader"); } while (!torture_must_stop()); diff --git a/kernel/torture.c b/kernel/torture.c index 507a20be6950..01e336f1e5b2 100644 --- a/kernel/torture.c +++ b/kernel/torture.c @@ -175,6 +175,19 @@ static unsigned long sum_online; static int min_online = -1; static int max_online; +static int torture_online_cpus = NR_CPUS; + +/* + * Some torture testing leverages confusion as to the number of online + * CPUs. This function returns the torture-testing view of this number, + * which allows torture tests to load-balance appropriately. + */ +int torture_num_online_cpus(void) +{ + return READ_ONCE(torture_online_cpus); +} +EXPORT_SYMBOL_GPL(torture_num_online_cpus); + /* * Attempt to take a CPU offline. Return false if the CPU is already * offline or if it is not subject to CPU-hotplug operations. The @@ -229,6 +242,8 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, *min_offl = delta; if (*max_offl < delta) *max_offl = delta; + WRITE_ONCE(torture_online_cpus, torture_online_cpus - 1); + WARN_ON_ONCE(torture_online_cpus <= 0); } return true; @@ -285,6 +300,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, *min_onl = delta; if (*max_onl < delta) *max_onl = delta; + WRITE_ONCE(torture_online_cpus, torture_online_cpus + 1); } return true; From d945f797e483979bdeded76266c366f35929afb8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 25 Dec 2020 07:40:48 -0800 Subject: [PATCH 114/130] rcutorture: Add rcutree.use_softirq=0 to RUDE01 and TASKS01 RCU's rcutree.use_softirq=0 kernel boot parameter substitutes the per-CPU rcuc kthreads for softirq, which is used in real-time installations. However, none of the rcutorture scenarios test this parameter. This commit therefore adds rcutree.use_softirq=0 to the RUDE01 and TASKS01 rcutorture scenarios, both of which indirectly exercise RCU. Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot | 1 + tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot | 1 + 2 files changed, 2 insertions(+) diff --git a/tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot b/tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot index 9363708c9075..932a0799eb08 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot @@ -1 +1,2 @@ rcutorture.torture_type=tasks-rude +rcutree.use_softirq=0 diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot index cd2a188eeb6d..22cdeced98ea 100644 --- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot +++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot @@ -1 +1,2 @@ rcutorture.torture_type=tasks +rcutree.use_softirq=0 From c261145abd2461f921ac44ad70c28778dda710f4 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:23 +0100 Subject: [PATCH 115/130] tools/nolibc: Add the definition for dup() This commit adds the dup() function, which was omitted when sys_dup() was defined. This is a port of nolibc's upstream commit 47cc42a79c92 to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index e61d36cd4e50..3115c6467d10 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1852,6 +1852,18 @@ int close(int fd) return ret; } +static __attribute__((unused)) +int dup(int fd) +{ + int ret = sys_dup(fd); + + if (ret < 0) { + SET_ERRNO(-ret); + ret = -1; + } + return ret; +} + static __attribute__((unused)) int dup2(int old, int new) { From 79f220e56dc85739aa5462fa8a1abd4a44f002e0 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:24 +0100 Subject: [PATCH 116/130] tools/nolibc: Make dup2() rely on dup3() when available A recent boot failure on 5.4-rc3 on arm64 revealed that sys_dup2() is not available and that only sys_dup3() is implemented. This commit detects this and falls back to sys_dup3() when available. This is a port of nolibc's upstream commit fd5272ec2c66 to the Linux kernel. Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 3115c6467d10..5fda4d844054 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1502,10 +1502,22 @@ int sys_dup(int fd) return my_syscall1(__NR_dup, fd); } +#ifdef __NR_dup3 +static __attribute__((unused)) +int sys_dup3(int old, int new, int flags) +{ + return my_syscall3(__NR_dup3, old, new, flags); +} +#endif + static __attribute__((unused)) int sys_dup2(int old, int new) { +#ifdef __NR_dup3 + return my_syscall3(__NR_dup3, old, new, 0); +#else return my_syscall2(__NR_dup2, old, new); +#endif } static __attribute__((unused)) @@ -1876,6 +1888,20 @@ int dup2(int old, int new) return ret; } +#ifdef __NR_dup3 +static __attribute__((unused)) +int dup3(int old, int new, int flags) +{ + int ret = sys_dup3(old, new, flags); + + if (ret < 0) { + SET_ERRNO(-ret); + ret = -1; + } + return ret; +} +#endif + static __attribute__((unused)) int execve(const char *filename, char *const argv[], char *const envp[]) { From c0c7c103756fee25aadfd5c36f7b86e318f9abb4 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:25 +0100 Subject: [PATCH 117/130] tools/nolibc: Make getpgrp() fall back to getpgid(0) The getpgrp() syscall is not implemented on arm64, so this commit instead uses getpgid(0) when getpgrp() is not available. This is a port of nolibc's upstream commit 2379f25073f9 to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 5fda4d844054..9209da89044a 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1544,10 +1544,16 @@ int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count) return my_syscall3(__NR_getdents64, fd, dirp, count); } +static __attribute__((unused)) +pid_t sys_getpgid(pid_t pid) +{ + return my_syscall1(__NR_getpgid, pid); +} + static __attribute__((unused)) pid_t sys_getpgrp(void) { - return my_syscall0(__NR_getpgrp); + return sys_getpgid(0); } static __attribute__((unused)) @@ -1950,6 +1956,18 @@ int getdents64(int fd, struct linux_dirent64 *dirp, int count) return ret; } +static __attribute__((unused)) +pid_t getpgid(pid_t pid) +{ + pid_t ret = sys_getpgid(pid); + + if (ret < 0) { + SET_ERRNO(-ret); + ret = -1; + } + return ret; +} + static __attribute__((unused)) pid_t getpgrp(void) { From be60ca41fbaa93bc8f92b24e34d8cc62af41300d Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:26 +0100 Subject: [PATCH 118/130] tools/nolibc: Implement fork() based on clone() Some archs such as arm64 do not have fork() and have to use clone() instead. This commit therefore makes fork() use clone() when available. This requires including signal.h to get the definition of SIGCHLD. This is a port of nolibc's upstream commit d2dc42fd6149 to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 9209da89044a..fdd5524e0e54 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -271,6 +271,8 @@ struct stat { #define WEXITSTATUS(status) (((status) & 0xff00) >> 8) #define WIFEXITED(status) (((status) & 0x7f) == 0) +/* for SIGCHLD */ +#include /* Below comes the architecture-specific code. For each architecture, we have * the syscall declarations and the _start code definition. This is the only @@ -1529,7 +1531,15 @@ int sys_execve(const char *filename, char *const argv[], char *const envp[]) static __attribute__((unused)) pid_t sys_fork(void) { +#ifdef __NR_clone + /* note: some archs only have clone() and not fork(). Different archs + * have a different API, but most archs have the flags on first arg and + * will not use the rest with no other flag. + */ + return my_syscall5(__NR_clone, SIGCHLD, 0, 0, 0, 0); +#else return my_syscall0(__NR_fork); +#endif } static __attribute__((unused)) From 5b1c827ca3b349801e2faff4185118cfa74f94c6 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:27 +0100 Subject: [PATCH 119/130] tools/nolibc: Implement poll() based on ppoll() Some architectures like arm64 do not implement poll() and have to use ppoll() instead. This commit therefore makes poll() use ppoll() when available. This is a port of nolibc's upstream commit 800f75c13ede to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index fdd5524e0e54..833693faf53c 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1652,7 +1652,17 @@ int sys_pivot_root(const char *new, const char *old) static __attribute__((unused)) int sys_poll(struct pollfd *fds, int nfds, int timeout) { +#if defined(__NR_ppoll) + struct timespec t; + + if (timeout >= 0) { + t.tv_sec = timeout / 1000; + t.tv_nsec = (timeout % 1000) * 1000000; + } + return my_syscall4(__NR_ppoll, fds, nfds, (timeout >= 0) ? &t : NULL, NULL); +#else return my_syscall3(__NR_poll, fds, nfds, timeout); +#endif } static __attribute__((unused)) From 70ca7aea50a27f03aa7e4cc6ee68940d13cbcd17 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:28 +0100 Subject: [PATCH 120/130] tools/nolibc: Get timeval, timespec and timezone from linux/time.h The definitions of timeval(), timespec() and timezone() conflict with linux/time.h when building, so this commit takes them directly from linux/time.h. This is a port of nolibc's upstream commit dc45f5426b0c to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 833693faf53c..611d9d15899d 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -94,6 +94,7 @@ #include #include #include +#include #define NOLIBC @@ -152,24 +153,6 @@ struct pollfd { short int revents; }; -/* for select() */ -struct timeval { - long tv_sec; - long tv_usec; -}; - -/* for pselect() */ -struct timespec { - long tv_sec; - long tv_nsec; -}; - -/* for gettimeofday() */ -struct timezone { - int tz_minuteswest; - int tz_dsttime; -}; - /* for getdents64() */ struct linux_dirent64 { uint64_t d_ino; From f65d7117785cb8ab04f1af55909807c7eb9ed30b Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:29 +0100 Subject: [PATCH 121/130] tools/nolibc: Remove incorrect definitions of __ARCH_WANT_* The __ARCH_WANT_* definitions were added in order to support aarch64 when it was missing some syscall definitions (including __NR_dup2, __NR_fork, and __NR_getpgrp), but these __ARCH_WANT_* definitions were actually wrong because these syscalls do not exist on this platform. Defining these resulted in exposing invalid definitions, resulting in failures on aarch64. The missing syscalls were since implemented based on the newer ones (__NR_dup3, __NR_clone, __NR_getpgid) so these incorrect __ARCH_WANT_* definitions are no longer needed. Thanks to Mark Rutland for spotting this incorrect analysis and explaining why it was wrong. This is a port of nolibc's upstream commit 00b1b0d9b2a4 to the Linux kernel. Reported-by: Mark Rutland Link: https://lore.kernel.org/lkml/20210119153147.GA5083@paulmck-ThinkPad-P72 Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 611d9d15899d..475d956ed1d6 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -81,14 +81,6 @@ * */ -/* Some archs (at least aarch64) don't expose the regular syscalls anymore by - * default, either because they have an "_at" replacement, or because there are - * more modern alternatives. For now we'd rather still use them. - */ -#define __ARCH_WANT_SYSCALL_NO_AT -#define __ARCH_WANT_SYSCALL_NO_FLAGS -#define __ARCH_WANT_SYSCALL_DEPRECATED - #include #include #include From 35635d7fa689492ca9edb1d949f1805f074ecf1a Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:30 +0100 Subject: [PATCH 122/130] tools/nolibc: Emit detailed error for missing alternate syscall number definitions Some syscalls can be implemented from different __NR_* variants. For example, sys_dup2() can be implemented based on __NR_dup3 or __NR_dup2. In this case it is useful to mention both alternatives in error messages when neither are detected. This information will help the user search for the right one (e.g __NR_dup3) instead of just the fallback (__NR_dup2) which might not exist on the platform. This is a port of nolibc's upstream commit a21080d2ba41 to the Linux kernel. Suggested-by: Mark Rutland Link: https://lore.kernel.org/lkml/20210120145447.GC77728@C02TD0UTHF1T.local/ Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 52 ++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 475d956ed1d6..618acad6c932 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -1446,8 +1446,10 @@ int sys_chmod(const char *path, mode_t mode) { #ifdef __NR_fchmodat return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0); -#else +#elif defined(__NR_chmod) return my_syscall2(__NR_chmod, path, mode); +#else +#error Neither __NR_fchmodat nor __NR_chmod defined, cannot implement sys_chmod() #endif } @@ -1456,8 +1458,10 @@ int sys_chown(const char *path, uid_t owner, gid_t group) { #ifdef __NR_fchownat return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0); -#else +#elif defined(__NR_chown) return my_syscall3(__NR_chown, path, owner, group); +#else +#error Neither __NR_fchownat nor __NR_chown defined, cannot implement sys_chown() #endif } @@ -1492,8 +1496,10 @@ int sys_dup2(int old, int new) { #ifdef __NR_dup3 return my_syscall3(__NR_dup3, old, new, 0); -#else +#elif defined(__NR_dup2) return my_syscall2(__NR_dup2, old, new); +#else +#error Neither __NR_dup3 nor __NR_dup2 defined, cannot implement sys_dup2() #endif } @@ -1512,8 +1518,10 @@ pid_t sys_fork(void) * will not use the rest with no other flag. */ return my_syscall5(__NR_clone, SIGCHLD, 0, 0, 0, 0); -#else +#elif defined(__NR_fork) return my_syscall0(__NR_fork); +#else +#error Neither __NR_clone nor __NR_fork defined, cannot implement sys_fork() #endif } @@ -1570,8 +1578,10 @@ int sys_link(const char *old, const char *new) { #ifdef __NR_linkat return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0); -#else +#elif defined(__NR_link) return my_syscall2(__NR_link, old, new); +#else +#error Neither __NR_linkat nor __NR_link defined, cannot implement sys_link() #endif } @@ -1586,8 +1596,10 @@ int sys_mkdir(const char *path, mode_t mode) { #ifdef __NR_mkdirat return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode); -#else +#elif defined(__NR_mkdir) return my_syscall2(__NR_mkdir, path, mode); +#else +#error Neither __NR_mkdirat nor __NR_mkdir defined, cannot implement sys_mkdir() #endif } @@ -1596,8 +1608,10 @@ long sys_mknod(const char *path, mode_t mode, dev_t dev) { #ifdef __NR_mknodat return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev); -#else +#elif defined(__NR_mknod) return my_syscall3(__NR_mknod, path, mode, dev); +#else +#error Neither __NR_mknodat nor __NR_mknod defined, cannot implement sys_mknod() #endif } @@ -1613,8 +1627,10 @@ int sys_open(const char *path, int flags, mode_t mode) { #ifdef __NR_openat return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode); -#else +#elif defined(__NR_open) return my_syscall3(__NR_open, path, flags, mode); +#else +#error Neither __NR_openat nor __NR_open defined, cannot implement sys_open() #endif } @@ -1635,8 +1651,10 @@ int sys_poll(struct pollfd *fds, int nfds, int timeout) t.tv_nsec = (timeout % 1000) * 1000000; } return my_syscall4(__NR_ppoll, fds, nfds, (timeout >= 0) ? &t : NULL, NULL); -#else +#elif defined(__NR_poll) return my_syscall3(__NR_poll, fds, nfds, timeout); +#else +#error Neither __NR_ppoll nor __NR_poll defined, cannot implement sys_poll() #endif } @@ -1676,11 +1694,13 @@ int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeva t.tv_nsec = timeout->tv_usec * 1000; } return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL); -#else +#elif defined(__NR__newselect) || defined(__NR_select) #ifndef __NR__newselect #define __NR__newselect __NR_select #endif return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout); +#else +#error None of __NR_select, __NR_pselect6, nor __NR__newselect defined, cannot implement sys_select() #endif } @@ -1705,8 +1725,10 @@ int sys_stat(const char *path, struct stat *buf) #ifdef __NR_newfstatat /* only solution for arm64 */ ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0); -#else +#elif defined(__NR_stat) ret = my_syscall2(__NR_stat, path, &stat); +#else +#error Neither __NR_newfstatat nor __NR_stat defined, cannot implement sys_stat() #endif buf->st_dev = stat.st_dev; buf->st_ino = stat.st_ino; @@ -1730,8 +1752,10 @@ int sys_symlink(const char *old, const char *new) { #ifdef __NR_symlinkat return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new); -#else +#elif defined(__NR_symlink) return my_syscall2(__NR_symlink, old, new); +#else +#error Neither __NR_symlinkat nor __NR_symlink defined, cannot implement sys_symlink() #endif } @@ -1752,8 +1776,10 @@ int sys_unlink(const char *path) { #ifdef __NR_unlinkat return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0); -#else +#elif defined(__NR_unlink) return my_syscall1(__NR_unlink, path); +#else +#error Neither __NR_unlinkat nor __NR_unlink defined, cannot implement sys_unlink() #endif } From 3c6ce7a5363723a05bfe3ee03a8d4a9b66841ae4 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:20:31 +0100 Subject: [PATCH 123/130] tools/nolibc: Fix position of -lgcc in the documented example The documentation header in the nolibc.h file provides an example command line, but it places the -lgcc argument before the source files, which can fail with libgcc.a (e.g. on ARM when uidiv is needed). This commit therefore moves the -lgcc to the end of the command line, hopefully before this example leaks into makefiles. This is a port of nolibc's upstream commit b5e282089223 to the Linux kernel. Fixes: 66b6f755ad45 ("rcutorture: Import a copy of nolibc") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/include/nolibc/nolibc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h index 618acad6c932..8b7a9830dd22 100644 --- a/tools/include/nolibc/nolibc.h +++ b/tools/include/nolibc/nolibc.h @@ -71,7 +71,7 @@ * * A simple static executable may be built this way : * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \ - * -static -include nolibc.h -lgcc -o hello hello.c + * -static -include nolibc.h -o hello hello.c -lgcc * * A very useful calling convention table may be found here : * http://man7.org/linux/man-pages/man2/syscall.2.html From 26cec81415b1b2a2e8e36ef0b24cf5f26467aa61 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 21 Jan 2021 08:48:08 +0100 Subject: [PATCH 124/130] tools/rcutorture: Fix position of -lgcc in mkinitrd.sh The -lgcc command-line argument is placed poorly in the build options, which can result in build failures, for exapmle, on ARM when uidiv() is required. This commit therefore places the -lgcc argument after the source files. Fixes: b94ec36896da ("rcutorture: Make use of nolibc when available") Tested-by: Valentin Schneider Tested-by: Mark Rutland [arm64] Signed-off-by: Willy Tarreau Signed-off-by: Paul E. McKenney --- tools/testing/selftests/rcutorture/bin/mkinitrd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh index 38e424d2392c..70d62fd0d31d 100755 --- a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh +++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh @@ -70,7 +70,7 @@ if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \ # architecture supported by nolibc ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \ -nostdlib -include ../../../../include/nolibc/nolibc.h \ - -lgcc -s -static -Os -o init init.c + -s -static -Os -o init init.c -lgcc else ${CROSS_COMPILE}gcc -s -static -Os -o init init.c fi From 8e7f37f2aaa56b723a24f6872817cf9c6410b613 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 7 Dec 2020 17:41:02 -0800 Subject: [PATCH 125/130] mm: Add mem_dump_obj() to print source of memory block There are kernel facilities such as per-CPU reference counts that give error messages in generic handlers or callbacks, whose messages are unenlightening. In the case of per-CPU reference-count underflow, this is not a problem when creating a new use of this facility because in that case the bug is almost certainly in the code implementing that new use. However, trouble arises when deploying across many systems, which might exercise corner cases that were not seen during development and testing. Here, it would be really nice to get some kind of hint as to which of several uses the underflow was caused by. This commit therefore exposes a mem_dump_obj() function that takes a pointer to memory (which must still be allocated if it has been dynamically allocated) and prints available information on where that memory came from. This pointer can reference the middle of the block as well as the beginning of the block, as needed by things like RCU callback functions and timer handlers that might not know where the beginning of the memory block is. These functions and handlers can use mem_dump_obj() to print out better hints as to where the problem might lie. The information printed can depend on kernel configuration. For example, the allocation return address can be printed only for slab and slub, and even then only when the necessary debug has been enabled. For slab, build with CONFIG_DEBUG_SLAB=y, and either use sizes with ample space to the next power of two or use the SLAB_STORE_USER when creating the kmem_cache structure. For slub, build with CONFIG_SLUB_DEBUG=y and boot with slub_debug=U, or pass SLAB_STORE_USER to kmem_cache_create() if more focused use is desired. Also for slub, use CONFIG_STACKTRACE to enable printing of the allocation-time stack trace. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Reported-by: Andrii Nakryiko [ paulmck: Convert to printing and change names per Joonsoo Kim. ] [ paulmck: Move slab definition per Stephen Rothwell and kbuild test robot. ] [ paulmck: Handle CONFIG_MMU=n case where vmalloc() is kmalloc(). ] [ paulmck: Apply Vlastimil Babka feedback on slab.c kmem_provenance(). ] [ paulmck: Extract more info from !SLUB_DEBUG per Joonsoo Kim. ] [ paulmck: Explicitly check for small pointers per Naresh Kamboju. ] Acked-by: Joonsoo Kim Acked-by: Vlastimil Babka Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- include/linux/mm.h | 2 ++ include/linux/slab.h | 2 ++ mm/slab.c | 20 ++++++++++++ mm/slab.h | 12 +++++++ mm/slab_common.c | 75 ++++++++++++++++++++++++++++++++++++++++++++ mm/slob.c | 6 ++++ mm/slub.c | 40 +++++++++++++++++++++++ mm/util.c | 24 ++++++++++++++ 8 files changed, 181 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 5299b90a6c40..af7d050900e7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3169,5 +3169,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; +void mem_dump_obj(void *object); + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index be4ba5867ac5..7ae604076767 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -186,6 +186,8 @@ void kfree(const void *); void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +bool kmem_valid_obj(void *object); +void kmem_dump_obj(void *object); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, diff --git a/mm/slab.c b/mm/slab.c index d7c8da9319c7..dcc55e78f353 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3635,6 +3635,26 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, EXPORT_SYMBOL(__kmalloc_node_track_caller); #endif /* CONFIG_NUMA */ +void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) +{ + struct kmem_cache *cachep; + unsigned int objnr; + void *objp; + + kpp->kp_ptr = object; + kpp->kp_page = page; + cachep = page->slab_cache; + kpp->kp_slab_cache = cachep; + objp = object - obj_offset(cachep); + kpp->kp_data_offset = obj_offset(cachep); + page = virt_to_head_page(objp); + objnr = obj_to_index(cachep, page, objp); + objp = index_to_obj(cachep, page, objnr); + kpp->kp_objp = objp; + if (DEBUG && cachep->flags & SLAB_STORE_USER) + kpp->kp_ret = *dbg_userword(cachep, objp); +} + /** * __do_kmalloc - allocate memory * @size: how many bytes of memory are required. diff --git a/mm/slab.h b/mm/slab.h index 1a756a359fa8..ecad9b57bc44 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -615,4 +615,16 @@ static inline bool slab_want_init_on_free(struct kmem_cache *c) return false; } +#define KS_ADDRS_COUNT 16 +struct kmem_obj_info { + void *kp_ptr; + struct page *kp_page; + void *kp_objp; + unsigned long kp_data_offset; + struct kmem_cache *kp_slab_cache; + void *kp_ret; + void *kp_stack[KS_ADDRS_COUNT]; +}; +void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page); + #endif /* MM_SLAB_H */ diff --git a/mm/slab_common.c b/mm/slab_common.c index e981c80d216c..adbace4256ef 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -537,6 +537,81 @@ bool slab_is_available(void) return slab_state >= UP; } +/** + * kmem_valid_obj - does the pointer reference a valid slab object? + * @object: pointer to query. + * + * Return: %true if the pointer is to a not-yet-freed object from + * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer + * is to an already-freed object, and %false otherwise. + */ +bool kmem_valid_obj(void *object) +{ + struct page *page; + + /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ + if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) + return false; + page = virt_to_head_page(object); + return PageSlab(page); +} + +/** + * kmem_dump_obj - Print available slab provenance information + * @object: slab object for which to find provenance information. + * + * This function uses pr_cont(), so that the caller is expected to have + * printed out whatever preamble is appropriate. The provenance information + * depends on the type of object and on how much debugging is enabled. + * For a slab-cache object, the fact that it is a slab object is printed, + * and, if available, the slab name, return address, and stack trace from + * the allocation of that object. + * + * This function will splat if passed a pointer to a non-slab object. + * If you are not sure what type of object you have, you should instead + * use mem_dump_obj(). + */ +void kmem_dump_obj(void *object) +{ + char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; + int i; + struct page *page; + unsigned long ptroffset; + struct kmem_obj_info kp = { }; + + if (WARN_ON_ONCE(!virt_addr_valid(object))) + return; + page = virt_to_head_page(object); + if (WARN_ON_ONCE(!PageSlab(page))) { + pr_cont(" non-slab memory.\n"); + return; + } + kmem_obj_info(&kp, object, page); + if (kp.kp_slab_cache) + pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); + else + pr_cont(" slab%s", cp); + if (kp.kp_objp) + pr_cont(" start %px", kp.kp_objp); + if (kp.kp_data_offset) + pr_cont(" data offset %lu", kp.kp_data_offset); + if (kp.kp_objp) { + ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; + pr_cont(" pointer offset %lu", ptroffset); + } + if (kp.kp_slab_cache && kp.kp_slab_cache->usersize) + pr_cont(" size %u", kp.kp_slab_cache->usersize); + if (kp.kp_ret) + pr_cont(" allocated at %pS\n", kp.kp_ret); + else + pr_cont("\n"); + for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) { + if (!kp.kp_stack[i]) + break; + pr_info(" %pS\n", kp.kp_stack[i]); + } +} + #ifndef CONFIG_SLOB /* Create a cache during boot when no slab services are available yet */ void __init create_boot_cache(struct kmem_cache *s, const char *name, diff --git a/mm/slob.c b/mm/slob.c index 8d4bfa46247f..ef87ada8705d 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -461,6 +461,12 @@ out: spin_unlock_irqrestore(&slob_lock, flags); } +void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) +{ + kpp->kp_ptr = object; + kpp->kp_page = page; +} + /* * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. */ diff --git a/mm/slub.c b/mm/slub.c index 0c8b43a5b3b0..3c1a84316fd7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3919,6 +3919,46 @@ int __kmem_cache_shutdown(struct kmem_cache *s) return 0; } +void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) +{ + void *base; + int __maybe_unused i; + unsigned int objnr; + void *objp; + void *objp0; + struct kmem_cache *s = page->slab_cache; + struct track __maybe_unused *trackp; + + kpp->kp_ptr = object; + kpp->kp_page = page; + kpp->kp_slab_cache = s; + base = page_address(page); + objp0 = kasan_reset_tag(object); +#ifdef CONFIG_SLUB_DEBUG + objp = restore_red_left(s, objp0); +#else + objp = objp0; +#endif + objnr = obj_to_index(s, page, objp); + kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp); + objp = base + s->size * objnr; + kpp->kp_objp = objp; + if (WARN_ON_ONCE(objp < base || objp >= base + page->objects * s->size || (objp - base) % s->size) || + !(s->flags & SLAB_STORE_USER)) + return; +#ifdef CONFIG_SLUB_DEBUG + trackp = get_track(s, objp, TRACK_ALLOC); + kpp->kp_ret = (void *)trackp->addr; +#ifdef CONFIG_STACKTRACE + for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { + kpp->kp_stack[i] = (void *)trackp->addrs[i]; + if (!kpp->kp_stack[i]) + break; + } +#endif +#endif +} + /******************************************************************** * Kmalloc subsystem *******************************************************************/ diff --git a/mm/util.c b/mm/util.c index 8c9b7d1e7c49..da46f9d6c382 100644 --- a/mm/util.c +++ b/mm/util.c @@ -982,3 +982,27 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) kunmap_atomic(addr1); return ret; } + +/** + * mem_dump_obj - Print available provenance information + * @object: object for which to find provenance information. + * + * This function uses pr_cont(), so that the caller is expected to have + * printed out whatever preamble is appropriate. The provenance information + * depends on the type of object and on how much debugging is enabled. + * For example, for a slab-cache object, the slab name is printed, and, + * if available, the return address and stack trace from the allocation + * of that object. + */ +void mem_dump_obj(void *object) +{ + if (!virt_addr_valid(object)) { + pr_cont(" non-paged (local) memory.\n"); + return; + } + if (kmem_valid_obj(object)) { + kmem_dump_obj(object); + return; + } + pr_cont(" non-slab memory.\n"); +} From b70fa3b12fc8d2b870d1ac7fd44da89271eb8705 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Dec 2020 15:26:22 -0800 Subject: [PATCH 126/130] mm: Make mem_dump_obj() handle NULL and zero-sized pointers This commit makes mem_dump_obj() call out NULL and zero-sized pointers specially instead of classifying them as non-paged memory. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Reported-by: Andrii Nakryiko Acked-by: Vlastimil Babka Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- mm/util.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mm/util.c b/mm/util.c index da46f9d6c382..92f23d2c1277 100644 --- a/mm/util.c +++ b/mm/util.c @@ -997,7 +997,12 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) void mem_dump_obj(void *object) { if (!virt_addr_valid(object)) { - pr_cont(" non-paged (local) memory.\n"); + if (object == NULL) + pr_cont(" NULL pointer.\n"); + else if (object == ZERO_SIZE_PTR) + pr_cont(" zero-size pointer.\n"); + else + pr_cont(" non-paged (local) memory.\n"); return; } if (kmem_valid_obj(object)) { From 98f180837a896ecedf8f7e12af22b57f271d43c9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Dec 2020 16:13:57 -0800 Subject: [PATCH 127/130] mm: Make mem_dump_obj() handle vmalloc() memory This commit adds vmalloc() support to mem_dump_obj(). Note that the vmalloc_dump_obj() function combines the checking and dumping, in contrast with the split between kmem_valid_obj() and kmem_dump_obj(). The reason for the difference is that the checking in the vmalloc() case involves acquiring a global lock, and redundant acquisitions of global locks should be avoided, even on not-so-fast paths. Note that this change causes on-stack variables to be reported as vmalloc() storage from kernel_clone() or similar, depending on the degree of inlining that your compiler does. This is likely more helpful than the earlier "non-paged (local) memory". Cc: Andrew Morton Cc: Joonsoo Kim Cc: Reported-by: Andrii Nakryiko Acked-by: Vlastimil Babka Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- include/linux/vmalloc.h | 6 ++++++ mm/util.c | 14 ++++++++------ mm/vmalloc.c | 12 ++++++++++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 80c0181c411d..c18f4751a704 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -246,4 +246,10 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); +#ifdef CONFIG_MMU +bool vmalloc_dump_obj(void *object); +#else +static inline bool vmalloc_dump_obj(void *object) { return false; } +#endif + #endif /* _LINUX_VMALLOC_H */ diff --git a/mm/util.c b/mm/util.c index 92f23d2c1277..54870226cea6 100644 --- a/mm/util.c +++ b/mm/util.c @@ -996,18 +996,20 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) */ void mem_dump_obj(void *object) { + if (kmem_valid_obj(object)) { + kmem_dump_obj(object); + return; + } + if (vmalloc_dump_obj(object)) + return; if (!virt_addr_valid(object)) { if (object == NULL) pr_cont(" NULL pointer.\n"); else if (object == ZERO_SIZE_PTR) pr_cont(" zero-size pointer.\n"); else - pr_cont(" non-paged (local) memory.\n"); + pr_cont(" non-paged memory.\n"); return; } - if (kmem_valid_obj(object)) { - kmem_dump_obj(object); - return; - } - pr_cont(" non-slab memory.\n"); + pr_cont(" non-slab/vmalloc memory.\n"); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4d88fe5a277a..c274ea4f14ea 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3448,6 +3448,18 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) } #endif /* CONFIG_SMP */ +bool vmalloc_dump_obj(void *object) +{ + struct vm_struct *vm; + void *objp = (void *)PAGE_ALIGN((unsigned long)object); + + vm = find_vm_area(objp); + if (!vm) + return false; + pr_cont(" vmalloc allocated at %pS\n", vm->caller); + return true; +} + #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmap_purge_lock) From bd34dcd4120d7e358baac9c22ef1321bd0c22079 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 9 Dec 2020 15:15:27 -0800 Subject: [PATCH 128/130] mm: Make mem_obj_dump() vmalloc() dumps include start and length This commit adds the starting address and number of pages to the vmalloc() information dumped by way of vmalloc_dump_obj(). Cc: Andrew Morton Cc: Joonsoo Kim Cc: Reported-by: Andrii Nakryiko Suggested-by: Vlastimil Babka Acked-by: Vlastimil Babka Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- mm/vmalloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c274ea4f14ea..e3229ff627ea 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3456,7 +3456,8 @@ bool vmalloc_dump_obj(void *object) vm = find_vm_area(objp); if (!vm) return false; - pr_cont(" vmalloc allocated at %pS\n", vm->caller); + pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", + vm->nr_pages, (unsigned long)vm->addr, vm->caller); return true; } From b4b7914a6a73fc169fd1ce2fcd78a1d83d9528a9 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Dec 2020 13:45:49 -0800 Subject: [PATCH 129/130] rcu: Make call_rcu() print mem_dump_obj() info for double-freed callback The debug-object double-free checks in __call_rcu() print out the RCU callback function, which is usually sufficient to track down the double free. However, all uses of things like queue_rcu_work() will have the same RCU callback function (rcu_work_rcufn() in this case), so a diagnostic message for a double queue_rcu_work() needs more than just the callback function. This commit therefore calls mem_dump_obj() to dump out any additional available information on the double-freed callback. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Reported-by: Andrii Nakryiko Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- kernel/rcu/tree.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 40e5e3dd253e..84513c52d9b0 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2941,6 +2941,7 @@ static void check_cb_ovld(struct rcu_data *rdp) static void __call_rcu(struct rcu_head *head, rcu_callback_t func) { + static atomic_t doublefrees; unsigned long flags; struct rcu_data *rdp; bool was_alldone; @@ -2954,8 +2955,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func) * Use rcu:rcu_callback trace event to find the previous * time callback was passed to __call_rcu(). */ - WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pS()!!!\n", - head, head->func); + if (atomic_inc_return(&doublefrees) < 4) { + pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func); + mem_dump_obj(head); + } WRITE_ONCE(head->func, rcu_leak_callback); return; } From 3375efeddf6972df47df26a5b5c643189bd3c02a Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Tue, 8 Dec 2020 14:43:43 -0800 Subject: [PATCH 130/130] percpu_ref: Dump mem_dump_obj() info upon reference-count underflow Reference-count underflow for percpu_ref is detected in the RCU callback percpu_ref_switch_to_atomic_rcu(), and the resulting warning does not print anything allowing easy identification of which percpu_ref use case is underflowing. This is of course not normally a problem when developing a new percpu_ref use case because it is most likely that the problem resides in this new use case. However, when deploying a new kernel to a large set of servers, the underflow might well be a new corner case in any of the old percpu_ref use cases. This commit therefore calls mem_dump_obj() to dump out any additional available information on the underflowing percpu_ref instance. Cc: Ming Lei Cc: Jens Axboe Cc: Joonsoo Kim Reported-by: Andrii Nakryiko Tested-by: Naresh Kamboju Signed-off-by: Paul E. McKenney --- lib/percpu-refcount.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index e59eda07305e..a1071cdefb5a 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -5,6 +5,7 @@ #include #include #include +#include #include /* @@ -168,6 +169,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) struct percpu_ref_data, rcu); struct percpu_ref *ref = data->ref; unsigned long __percpu *percpu_count = percpu_count_ptr(ref); + static atomic_t underflows; unsigned long count = 0; int cpu; @@ -191,9 +193,13 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) */ atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); - WARN_ONCE(atomic_long_read(&data->count) <= 0, - "percpu ref (%ps) <= 0 (%ld) after switching to atomic", - data->release, atomic_long_read(&data->count)); + if (WARN_ONCE(atomic_long_read(&data->count) <= 0, + "percpu ref (%ps) <= 0 (%ld) after switching to atomic", + data->release, atomic_long_read(&data->count)) && + atomic_inc_return(&underflows) < 4) { + pr_err("%s(): percpu_ref underflow", __func__); + mem_dump_obj(data); + } /* @ref is viewed as dead on all CPUs, send out switch confirmation */ percpu_ref_call_confirm_rcu(rcu);