x86, memblock: Remove __memblock_x86_find_in_range_size()

Fold it into memblock_x86_find_in_range(), and change bad_addr_size()
to check_reserve_memblock().

So whole memblock_x86_find_in_range_size() code is more readable.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <4CAA4DEC.4000401@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
Yinghai Lu 2010-10-04 14:58:04 -07:00 committed by H. Peter Anvin
parent f1af98c762
commit 16c36f743b

View file

@ -8,7 +8,7 @@
#include <linux/range.h>
/* Check for already reserved areas */
static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align)
{
struct memblock_region *r;
u64 addr = *addrp, last;
@ -30,7 +30,7 @@ again:
goto again;
}
if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
*sizep = 0;
return false;
}
}
@ -41,29 +41,6 @@ again:
return changed;
}
static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align)
{
u64 addr, last;
addr = round_up(ei_start, align);
if (addr < start)
addr = round_up(start, align);
if (addr >= ei_last)
goto out;
*sizep = ei_last - addr;
while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
;
last = addr + *sizep;
if (last > ei_last)
goto out;
return addr;
out:
return MEMBLOCK_ERROR;
}
/*
* Find next free range after start, and size is returned in *sizep
*/
@ -76,10 +53,16 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
u64 ei_last = ei_start + r->size;
u64 addr;
addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
sizep, align);
addr = round_up(ei_start, align);
if (addr < start)
addr = round_up(start, align);
if (addr >= ei_last)
continue;
*sizep = ei_last - addr;
while (check_with_memblock_reserved_size(&addr, sizep, align))
;
if (addr != MEMBLOCK_ERROR)
if (*sizep)
return addr;
}