diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 56562ff01076..6747a3eddeb1 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -73,6 +73,12 @@ #define KERNEL_START _text #define KERNEL_END _end +#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#define MAX_USER_VA_BITS 52 +#else +#define MAX_USER_VA_BITS VA_BITS +#endif + /* * KASAN requires 1/8th of the kernel virtual address space for the shadow * region. KASAN can bloat the stack significantly, so double the (minimum) diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 2a700f7b12d2..54a37660b8c9 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -80,11 +80,7 @@ #define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS) #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 -#define PTRS_PER_PGD (1 << (52 - PGDIR_SHIFT)) -#else -#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) -#endif +#define PTRS_PER_PGD (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT)) /* * Section address mask and size definitions. diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 538ecbc15067..bbecc6fe3e5b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -20,11 +20,7 @@ #define __ASM_PROCESSOR_H #define KERNEL_DS UL(-1) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 -#define USER_DS ((UL(1) << 52) - 1) -#else -#define USER_DS ((UL(1) << VA_BITS) - 1) -#endif /* CONFIG_ARM64_USER_VA_BITS_52 */ +#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1) /* * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is