Revert "mm, mmu_notifier: annotate mmu notifiers with blockable invalidate callbacks"
Reverthifive-unleashed-5.15ff7091f5a
("mm, mmu_notifier: annotate mmu notifiers with blockable invalidate callbacks"). MMU_INVALIDATE_DOES_NOT_BLOCK flags was the only one used and it is no longer needed since93065ac753
("mm, oom: distinguish blockable mode for mmu notifiers"). We now have a full support for per range !blocking behavior so we can drop the stop gap workaround which the per notifier flag was used for. Link: http://lkml.kernel.org/r/20180827112623.8992-4-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
parent
33490af3f5
commit
4e15a073a1
|
@ -77,7 +77,6 @@ static void do_remove(struct mmu_rb_handler *handler,
|
||||||
static void handle_remove(struct work_struct *work);
|
static void handle_remove(struct work_struct *work);
|
||||||
|
|
||||||
static const struct mmu_notifier_ops mn_opts = {
|
static const struct mmu_notifier_ops mn_opts = {
|
||||||
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
|
|
||||||
.invalidate_range_start = mmu_notifier_range_start,
|
.invalidate_range_start = mmu_notifier_range_start,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -427,7 +427,6 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops iommu_mn = {
|
static const struct mmu_notifier_ops iommu_mn = {
|
||||||
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
|
|
||||||
.release = mn_release,
|
.release = mn_release,
|
||||||
.clear_flush_young = mn_clear_flush_young,
|
.clear_flush_young = mn_clear_flush_young,
|
||||||
.invalidate_range = mn_invalidate_range,
|
.invalidate_range = mn_invalidate_range,
|
||||||
|
|
|
@ -273,7 +273,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops intel_mmuops = {
|
static const struct mmu_notifier_ops intel_mmuops = {
|
||||||
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
|
|
||||||
.release = intel_mm_release,
|
.release = intel_mm_release,
|
||||||
.change_pte = intel_change_pte,
|
.change_pte = intel_change_pte,
|
||||||
.invalidate_range = intel_invalidate_range,
|
.invalidate_range = intel_invalidate_range,
|
||||||
|
|
|
@ -261,7 +261,6 @@ static void gru_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||||
|
|
||||||
|
|
||||||
static const struct mmu_notifier_ops gru_mmuops = {
|
static const struct mmu_notifier_ops gru_mmuops = {
|
||||||
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
|
|
||||||
.invalidate_range_start = gru_invalidate_range_start,
|
.invalidate_range_start = gru_invalidate_range_start,
|
||||||
.invalidate_range_end = gru_invalidate_range_end,
|
.invalidate_range_end = gru_invalidate_range_end,
|
||||||
.release = gru_release,
|
.release = gru_release,
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
#ifndef _LINUX_MMU_NOTIFIER_H
|
#ifndef _LINUX_MMU_NOTIFIER_H
|
||||||
#define _LINUX_MMU_NOTIFIER_H
|
#define _LINUX_MMU_NOTIFIER_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/mm_types.h>
|
#include <linux/mm_types.h>
|
||||||
|
@ -11,9 +10,6 @@
|
||||||
struct mmu_notifier;
|
struct mmu_notifier;
|
||||||
struct mmu_notifier_ops;
|
struct mmu_notifier_ops;
|
||||||
|
|
||||||
/* mmu_notifier_ops flags */
|
|
||||||
#define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01)
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU_NOTIFIER
|
#ifdef CONFIG_MMU_NOTIFIER
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -30,15 +26,6 @@ struct mmu_notifier_mm {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mmu_notifier_ops {
|
struct mmu_notifier_ops {
|
||||||
/*
|
|
||||||
* Flags to specify behavior of callbacks for this MMU notifier.
|
|
||||||
* Used to determine which context an operation may be called.
|
|
||||||
*
|
|
||||||
* MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not
|
|
||||||
* block
|
|
||||||
*/
|
|
||||||
int flags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called either by mmu_notifier_unregister or when the mm is
|
* Called either by mmu_notifier_unregister or when the mm is
|
||||||
* being destroyed by exit_mmap, always before all pages are
|
* being destroyed by exit_mmap, always before all pages are
|
||||||
|
@ -183,10 +170,6 @@ struct mmu_notifier_ops {
|
||||||
* Note that this function might be called with just a sub-range
|
* Note that this function might be called with just a sub-range
|
||||||
* of what was passed to invalidate_range_start()/end(), if
|
* of what was passed to invalidate_range_start()/end(), if
|
||||||
* called between those functions.
|
* called between those functions.
|
||||||
*
|
|
||||||
* If this callback cannot block, and invalidate_range_{start,end}
|
|
||||||
* cannot block, mmu_notifier_ops.flags should have
|
|
||||||
* MMU_INVALIDATE_DOES_NOT_BLOCK set.
|
|
||||||
*/
|
*/
|
||||||
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
|
void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
|
@ -241,7 +224,6 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
|
||||||
bool only_end);
|
bool only_end);
|
||||||
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
|
|
||||||
|
|
||||||
static inline void mmu_notifier_release(struct mm_struct *mm)
|
static inline void mmu_notifier_release(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
@ -495,11 +477,6 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
|
static inline void mmu_notifier_mm_init(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
|
@ -247,37 +247,6 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
|
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
|
||||||
|
|
||||||
/*
|
|
||||||
* Must be called while holding mm->mmap_sem for either read or write.
|
|
||||||
* The result is guaranteed to be valid until mm->mmap_sem is dropped.
|
|
||||||
*/
|
|
||||||
bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
struct mmu_notifier *mn;
|
|
||||||
int id;
|
|
||||||
bool ret = false;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
|
|
||||||
|
|
||||||
if (!mm_has_notifiers(mm))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
id = srcu_read_lock(&srcu);
|
|
||||||
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
|
|
||||||
if (!mn->ops->invalidate_range &&
|
|
||||||
!mn->ops->invalidate_range_start &&
|
|
||||||
!mn->ops->invalidate_range_end)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!(mn->ops->flags & MMU_INVALIDATE_DOES_NOT_BLOCK)) {
|
|
||||||
ret = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
srcu_read_unlock(&srcu, id);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
static int do_mmu_notifier_register(struct mmu_notifier *mn,
|
||||||
struct mm_struct *mm,
|
struct mm_struct *mm,
|
||||||
int take_mmap_sem)
|
int take_mmap_sem)
|
||||||
|
|
|
@ -497,7 +497,6 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
|
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
|
||||||
.flags = MMU_INVALIDATE_DOES_NOT_BLOCK,
|
|
||||||
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
|
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
|
||||||
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
|
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
|
||||||
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
|
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
|
||||||
|
|
Loading…
Reference in New Issue