include/linux/sched/mm.h: re-inline mmdrop()
As Peter points out, Doing a CALL+RET for just the decrement is a bit silly.
Fixes: d70f2a14b7
("include/linux/sched/mm.h: uninline mmdrop_async(), etc")
Acked-by: Peter Zijlstra (Intel) <peterz@infraded.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
hifive-unleashed-5.1
parent
7ed1c1901f
commit
d34bc48f82
|
@ -36,7 +36,18 @@ static inline void mmgrab(struct mm_struct *mm)
|
||||||
atomic_inc(&mm->mm_count);
|
atomic_inc(&mm->mm_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void mmdrop(struct mm_struct *mm);
|
extern void __mmdrop(struct mm_struct *mm);
|
||||||
|
|
||||||
|
static inline void mmdrop(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The implicit full barrier implied by atomic_dec_and_test() is
|
||||||
|
* required by the membarrier system call before returning to
|
||||||
|
* user-space, after storing to rq->curr.
|
||||||
|
*/
|
||||||
|
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
||||||
|
__mmdrop(mm);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmget() - Pin the address space associated with a &struct mm_struct.
|
* mmget() - Pin the address space associated with a &struct mm_struct.
|
||||||
|
|
|
@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
|
||||||
* is dropped: either by a lazy thread or by
|
* is dropped: either by a lazy thread or by
|
||||||
* mmput. Free the page directory and the mm.
|
* mmput. Free the page directory and the mm.
|
||||||
*/
|
*/
|
||||||
static void __mmdrop(struct mm_struct *mm)
|
void __mmdrop(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
BUG_ON(mm == &init_mm);
|
BUG_ON(mm == &init_mm);
|
||||||
mm_free_pgd(mm);
|
mm_free_pgd(mm);
|
||||||
|
@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
|
||||||
put_user_ns(mm->user_ns);
|
put_user_ns(mm->user_ns);
|
||||||
free_mm(mm);
|
free_mm(mm);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||||
void mmdrop(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* The implicit full barrier implied by atomic_dec_and_test() is
|
|
||||||
* required by the membarrier system call before returning to
|
|
||||||
* user-space, after storing to rq->curr.
|
|
||||||
*/
|
|
||||||
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
|
||||||
__mmdrop(mm);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(mmdrop);
|
|
||||||
|
|
||||||
static void mmdrop_async_fn(struct work_struct *work)
|
static void mmdrop_async_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue