diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9250f14be8ef..f867bba893ca 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -287,15 +287,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) int prepare_hugepage_range(unsigned long addr, unsigned long len) { - int err; + int err = 0; if ( (addr+len) < addr ) return -EINVAL; - if ((addr + len) < 0x100000000UL) + if (addr < 0x100000000UL) err = open_low_hpage_areas(current->mm, LOW_ESID_MASK(addr, len)); - else + if ((addr + len) >= 0x100000000UL) err = open_high_hpage_areas(current->mm, HTLB_AREA_MASK(addr, len)); if (err) { diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 1e6e7846824f..58a3dd9a79ec 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -135,9 +135,9 @@ extern unsigned int HPAGE_SHIFT; #define in_hugepage_area(context, addr) \ (cpu_has_feature(CPU_FTR_16M_PAGE) && \ - ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ - ( ((addr) < 0x100000000L) && \ - ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) + ( ( (addr) >= 0x100000000UL) \ + ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \ + : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) #else /* !CONFIG_HUGETLB_PAGE */