1
0
Fork 0

srcu: Apply trivial callback lists to shrink Tiny SRCU

The rcu_segcblist structure provides quite a bit of functionality, and
Tiny SRCU needs almost none of it.  So this commit replaces Tiny SRCU's
uses of rcu_segcblist with a simple singly linked list with tail pointer.
This change significantly reduces Tiny SRCU's memory footprint, more
than making up for the growth caused by the creation of rcu_segcblist.c

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
zero-colors
Paul E. McKenney 2017-05-04 14:29:16 -07:00
parent 5a0465e17a
commit 2464dd940e
4 changed files with 36 additions and 43 deletions

View File

@ -33,9 +33,8 @@ struct srcu_struct {
u8 srcu_gp_waiting; /* GP waiting for readers? */ u8 srcu_gp_waiting; /* GP waiting for readers? */
struct swait_queue_head srcu_wq; struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */ /* Last srcu_read_unlock() wakes GP. */
unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */ struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
struct rcu_segcblist srcu_cblist; struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
/* Pending SRCU callbacks. */
struct work_struct srcu_work; /* For driving grace periods. */ struct work_struct srcu_work; /* For driving grace periods. */
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map; struct lockdep_map dep_map;
@ -47,7 +46,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define __SRCU_STRUCT_INIT(name) \ #define __SRCU_STRUCT_INIT(name) \
{ \ { \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \ .srcu_cb_tail = &name.srcu_cb_head, \
.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
__SRCU_DEP_MAP_INIT(name) \ __SRCU_DEP_MAP_INIT(name) \
} }

View File

@ -573,7 +573,7 @@ config RCU_STALL_COMMON
making these warnings mandatory for the tree variants. making these warnings mandatory for the tree variants.
config RCU_NEED_SEGCBLIST config RCU_NEED_SEGCBLIST
def_bool ( TREE_RCU || PREEMPT_RCU || TINY_SRCU || TREE_SRCU ) def_bool ( TREE_RCU || PREEMPT_RCU || TREE_SRCU )
config CONTEXT_TRACKING config CONTEXT_TRACKING
bool bool

View File

@ -398,7 +398,7 @@ static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
if (test_type != SRCU_FLAVOR) if (test_type != SRCU_FLAVOR)
return; return;
*flags = 0; *flags = 0;
*completed = sp->srcu_gp_seq; *completed = sp->srcu_idx;
*gpnum = *completed; *gpnum = *completed;
} }

View File

@ -38,8 +38,8 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
sp->srcu_lock_nesting[0] = 0; sp->srcu_lock_nesting[0] = 0;
sp->srcu_lock_nesting[1] = 0; sp->srcu_lock_nesting[1] = 0;
init_swait_queue_head(&sp->srcu_wq); init_swait_queue_head(&sp->srcu_wq);
sp->srcu_gp_seq = 0; sp->srcu_cb_head = NULL;
rcu_segcblist_init(&sp->srcu_cblist); sp->srcu_cb_tail = &sp->srcu_cb_head;
sp->srcu_gp_running = false; sp->srcu_gp_running = false;
sp->srcu_gp_waiting = false; sp->srcu_gp_waiting = false;
sp->srcu_idx = 0; sp->srcu_idx = 0;
@ -88,10 +88,10 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
{ {
WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
flush_work(&sp->srcu_work); flush_work(&sp->srcu_work);
WARN_ON(rcu_seq_state(sp->srcu_gp_seq));
WARN_ON(sp->srcu_gp_running); WARN_ON(sp->srcu_gp_running);
WARN_ON(sp->srcu_gp_waiting); WARN_ON(sp->srcu_gp_waiting);
WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist)); WARN_ON(sp->srcu_cb_head);
WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
} }
EXPORT_SYMBOL_GPL(cleanup_srcu_struct); EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
@ -117,52 +117,44 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
void srcu_drive_gp(struct work_struct *wp) void srcu_drive_gp(struct work_struct *wp)
{ {
int idx; int idx;
struct rcu_cblist ready_cbs; struct rcu_head *lh;
struct srcu_struct *sp;
struct rcu_head *rhp; struct rcu_head *rhp;
struct srcu_struct *sp;
sp = container_of(wp, struct srcu_struct, srcu_work); sp = container_of(wp, struct srcu_struct, srcu_work);
if (sp->srcu_gp_running || rcu_segcblist_empty(&sp->srcu_cblist)) if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
return; /* Already running or nothing to do. */ return; /* Already running or nothing to do. */
/* Tag recently arrived callbacks and wait for readers. */ /* Remove recently arrived callbacks and wait for readers. */
WRITE_ONCE(sp->srcu_gp_running, true); WRITE_ONCE(sp->srcu_gp_running, true);
rcu_segcblist_accelerate(&sp->srcu_cblist, local_irq_disable();
rcu_seq_snap(&sp->srcu_gp_seq)); lh = sp->srcu_cb_head;
rcu_seq_start(&sp->srcu_gp_seq); sp->srcu_cb_head = NULL;
sp->srcu_cb_tail = &sp->srcu_cb_head;
local_irq_enable();
idx = sp->srcu_idx; idx = sp->srcu_idx;
WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
rcu_seq_end(&sp->srcu_gp_seq);
/* Update callback list based on GP, and invoke ready callbacks. */ /* Invoke the callbacks we removed above. */
rcu_segcblist_advance(&sp->srcu_cblist, while (lh) {
rcu_seq_current(&sp->srcu_gp_seq)); rhp = lh;
if (rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { lh = lh->next;
rcu_cblist_init(&ready_cbs); local_bh_disable();
local_irq_disable(); rhp->func(rhp);
rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); local_bh_enable();
local_irq_enable();
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
local_bh_disable();
rhp->func(rhp);
local_bh_enable();
}
local_irq_disable();
rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs);
local_irq_enable();
} }
WRITE_ONCE(sp->srcu_gp_running, false);
/* /*
* If more callbacks, reschedule ourselves. This can race with * Enable rescheduling, and if there are more callbacks,
* a call_srcu() at interrupt level, but the ->srcu_gp_running * reschedule ourselves. This can race with a call_srcu()
* checks will straighten that out. * at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/ */
if (!rcu_segcblist_empty(&sp->srcu_cblist)) WRITE_ONCE(sp->srcu_gp_running, false);
if (READ_ONCE(sp->srcu_cb_head))
schedule_work(&sp->srcu_work); schedule_work(&sp->srcu_work);
} }
EXPORT_SYMBOL_GPL(srcu_drive_gp); EXPORT_SYMBOL_GPL(srcu_drive_gp);
@ -171,14 +163,16 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure, * Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running. * initiating grace-period processing if it is not already running.
*/ */
void call_srcu(struct srcu_struct *sp, struct rcu_head *head, void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rcu_callback_t func) rcu_callback_t func)
{ {
unsigned long flags; unsigned long flags;
head->func = func; rhp->func = func;
rhp->next = NULL;
local_irq_save(flags); local_irq_save(flags);
rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); *sp->srcu_cb_tail = rhp;
sp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags); local_irq_restore(flags);
if (!READ_ONCE(sp->srcu_gp_running)) if (!READ_ONCE(sp->srcu_gp_running))
schedule_work(&sp->srcu_work); schedule_work(&sp->srcu_work);