1
0
Fork 0

ia64/tlb: Eradicate tlb_migrate_finish() callback

Only ia64-sn2 uses this as an optimization, and there it is of
questionable correctness due to the mm_users==1 test.

Remove it entirely.

No change in behavior intended.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.2
Peter Zijlstra 2019-02-19 14:38:37 +01:00 committed by Ingo Molnar
parent 0a8caf211b
commit 6455959819
8 changed files with 0 additions and 40 deletions

View File

@ -101,16 +101,6 @@ changes occur:
translations for software managed TLB configurations. translations for software managed TLB configurations.
The sparc64 port currently does this. The sparc64 port currently does this.
6) ``void tlb_migrate_finish(struct mm_struct *mm)``
This interface is called at the end of an explicit
process migration. This interface provides a hook
to allow a platform to update TLB or context-specific
information for the address space.
The ia64 sn2 platform is one example of a platform
that uses this interface.
Next, we have the cache flushing interfaces. In general, when Linux Next, we have the cache flushing interfaces. In general, when Linux
is changing an existing virtual-->physical mapping to a new value, is changing an existing virtual-->physical mapping to a new value,
the sequence will be in one of the following forms:: the sequence will be in one of the following forms::

View File

@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int); typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *); typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef u8 ia64_mv_irq_to_vector (int); typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8); typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
@ -79,11 +78,6 @@ machvec_noop (void)
{ {
} }
static inline void
machvec_noop_mm (struct mm_struct *mm)
{
}
static inline void static inline void
machvec_noop_task (struct task_struct *task) machvec_noop_task (struct task_struct *task)
{ {
@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
extern void machvec_setup (char **); extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *); extern void machvec_timer_interrupt (int, void *);
extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM) # if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h> # include <asm/machvec_hpsim.h>
@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_send_ipi ia64_mv.send_ipi # define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt # define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge # define platform_global_tlb_purge ia64_mv.global_tlb_purge
# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init # define platform_dma_init ia64_mv.dma_init
# define platform_dma_get_ops ia64_mv.dma_get_ops # define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector # define platform_irq_to_vector ia64_mv.irq_to_vector
@ -167,7 +159,6 @@ struct ia64_machine_vector {
ia64_mv_send_ipi_t *send_ipi; ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt; ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge; ia64_mv_global_tlb_purge_t *global_tlb_purge;
ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init; ia64_mv_dma_init *dma_init;
ia64_mv_dma_get_ops *dma_get_ops; ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector; ia64_mv_irq_to_vector *irq_to_vector;
@ -206,7 +197,6 @@ struct ia64_machine_vector {
platform_send_ipi, \ platform_send_ipi, \
platform_timer_interrupt, \ platform_timer_interrupt, \
platform_global_tlb_purge, \ platform_global_tlb_purge, \
platform_tlb_migrate_finish, \
platform_dma_init, \ platform_dma_init, \
platform_dma_get_ops, \ platform_dma_get_ops, \
platform_irq_to_vector, \ platform_irq_to_vector, \
@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
#ifndef platform_global_tlb_purge #ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif #endif
#ifndef platform_tlb_migrate_finish
# define platform_tlb_migrate_finish machvec_noop_mm
#endif
#ifndef platform_kernel_launch_event #ifndef platform_kernel_launch_event
# define platform_kernel_launch_event machvec_noop # define platform_kernel_launch_event machvec_noop
#endif #endif

View File

@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI; extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt; extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_to_vector sn_irq_to_vector; extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_send_ipi sn2_send_IPI #define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt #define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge #define platform_global_tlb_purge sn2_global_tlb_purge
#define platform_tlb_migrate_finish sn_tlb_migrate_finish
#define platform_pci_fixup sn_pci_fixup #define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb #define platform_inb __sn_inb
#define platform_inw __sn_inw #define platform_inw __sn_inw

View File

@ -47,8 +47,6 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
#include <asm-generic/tlb.h> #include <asm-generic/tlb.h>
#endif /* _ASM_IA64_TLB_H */ #endif /* _ASM_IA64_TLB_H */

View File

@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
cpu_relax(); cpu_relax();
} }
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
/* flush_tlb_mm is inefficient if more than 1 users of mm */
if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
flush_tlb_mm(mm);
}
static void static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm) sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{ {

View File

@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void update_mmu_cache(struct vm_area_struct *vma, void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t * pte); unsigned long address, pte_t * pte);
void tlb_migrate_finish(struct mm_struct *mm);
#endif #endif

View File

@ -604,8 +604,4 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#ifndef tlb_migrate_finish
#define tlb_migrate_finish(mm) do {} while (0)
#endif
#endif /* _ASM_GENERIC__TLB_H */ #endif /* _ASM_GENERIC__TLB_H */

View File

@ -1151,7 +1151,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
/* Need help from migration thread: drop lock and wait. */ /* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, p, &rf); task_rq_unlock(rq, p, &rf);
stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
return 0; return 0;
} else if (task_on_rq_queued(p)) { } else if (task_on_rq_queued(p)) {
/* /*