1
0
Fork 0

x86/boot/e820: Rename 'e820_map' variables to 'e820_array'

In line with the rename to 'struct e820_array', harmonize the naming of common e820
table variable names as well:

 e820          =>  e820_array
 e820_saved    =>  e820_array_saved
 e820_map      =>  e820_array
 initial_e820  =>  e820_array_init

This makes the variable names more consistent  and easier to grep for.

No change in functionality.

Cc: Alex Thorlton <athorlton@sgi.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Huang, Ying <ying.huang@intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul Jackson <pj@sgi.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
hifive-unleashed-5.1
Ingo Molnar 2017-01-27 13:20:53 +01:00
parent e79d74d085
commit acd4c04872
19 changed files with 158 additions and 158 deletions

View File

@ -27,12 +27,12 @@ Offset Proto Name Meaning
1C0/020 ALL efi_info EFI 32 information (struct efi_info)
1E0/004 ALL alk_mem_k Alternative mem check, in KB
1E4/004 ALL scratch Scratch field for the kernel setup code
1E8/001 ALL e820_entries Number of entries in e820_map (below)
1E8/001 ALL e820_entries Number of entries in e820_array (below)
1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
(below)
1EF/001 ALL sentinel Used to detect broken bootloaders
290/040 ALL edd_mbr_sig_buffer EDD MBR signatures
2D0/A00 ALL e820_map E820 memory map table
2D0/A00 ALL e820_array E820 memory map table
(array of struct e820_entry)
D00/1EC ALL eddbuf EDD data (array of struct edd_info)

View File

@ -917,7 +917,7 @@ static void add_e820ext(struct boot_params *params,
static efi_status_t setup_e820(struct boot_params *params,
struct setup_data *e820ext, u32 e820ext_size)
{
struct e820_entry *e820_map = &params->e820_map[0];
struct e820_entry *e820_array = &params->e820_array[0];
struct efi_info *efi = &params->efi_info;
struct e820_entry *prev = NULL;
u32 nr_entries;
@ -982,7 +982,7 @@ static efi_status_t setup_e820(struct boot_params *params,
continue;
}
if (nr_entries == ARRAY_SIZE(params->e820_map)) {
if (nr_entries == ARRAY_SIZE(params->e820_array)) {
u32 need = (nr_desc - i) * sizeof(struct e820_entry) +
sizeof(struct setup_data);
@ -990,18 +990,18 @@ static efi_status_t setup_e820(struct boot_params *params,
return EFI_BUFFER_TOO_SMALL;
/* boot_params map full, switch to e820 extended */
e820_map = (struct e820_entry *)e820ext->data;
e820_array = (struct e820_entry *)e820ext->data;
}
e820_map->addr = d->phys_addr;
e820_map->size = d->num_pages << PAGE_SHIFT;
e820_map->type = e820_type;
prev = e820_map++;
e820_array->addr = d->phys_addr;
e820_array->size = d->num_pages << PAGE_SHIFT;
e820_array->type = e820_type;
prev = e820_array++;
nr_entries++;
}
if (nr_entries > ARRAY_SIZE(params->e820_map)) {
u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_map);
if (nr_entries > ARRAY_SIZE(params->e820_array)) {
u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_array);
add_e820ext(params, e820ext, nr_e820ext);
nr_entries -= nr_e820ext;
@ -1055,9 +1055,9 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
if (first) {
nr_desc = *map->buff_size / *map->desc_size;
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
if (nr_desc > ARRAY_SIZE(p->boot_params->e820_array)) {
u32 nr_e820ext = nr_desc -
ARRAY_SIZE(p->boot_params->e820_map);
ARRAY_SIZE(p->boot_params->e820_array);
status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);

View File

@ -518,7 +518,7 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
/* Verify potential e820 positions, appending to slots list. */
for (i = 0; i < boot_params->e820_entries; i++) {
process_e820_entry(&boot_params->e820_map[i], minimum,
process_e820_entry(&boot_params->e820_array[i], minimum,
image_size);
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820 scan (slot_areas full)!\n");

View File

@ -21,7 +21,7 @@ static int detect_memory_e820(void)
{
int count = 0;
struct biosregs ireg, oreg;
struct e820_entry *desc = boot_params.e820_map;
struct e820_entry *desc = boot_params.e820_array;
static struct e820_entry buf; /* static so it is zeroed */
initregs(&ireg);
@ -66,7 +66,7 @@ static int detect_memory_e820(void)
*desc++ = buf;
count++;
} while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_map));
} while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_array));
return boot_params.e820_entries = count;
}

View File

@ -4,8 +4,8 @@
#include <asm/e820/types.h>
/* see comment in arch/x86/kernel/e820.c */
extern struct e820_array *e820;
extern struct e820_array *e820_saved;
extern struct e820_array *e820_array;
extern struct e820_array *e820_array_saved;
extern unsigned long pci_mem_start;
@ -13,7 +13,7 @@ extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
extern int sanitize_e820_map(struct e820_entry *biosmap, int max_nr_map, u32 *pnr_map);
extern int sanitize_e820_array(struct e820_entry *biosmap, int max_nr_map, u32 *pnr_map);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, int checktype);
extern void update_e820(void);

View File

@ -21,7 +21,7 @@
*
* This allows for bootstrap/firmware quirks such as possible duplicate
* E820 entries that might need room in the same arrays, prior to the
* call to sanitize_e820_map() to remove duplicates. The allowance
* call to sanitize_e820_array() to remove duplicates. The allowance
* of three memory map entries per node is "enough" entries for
* the initial hardware platform motivating this mechanism to make
* use of additional EFI map entries. Future platforms may want

View File

@ -152,7 +152,7 @@ struct boot_params {
struct setup_header hdr; /* setup header */ /* 0x1f1 */
__u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
__u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
struct e820_entry e820_map[E820MAX]; /* 0x2d0 */
struct e820_entry e820_array[E820MAX]; /* 0x2d0 */
__u8 _pad8[48]; /* 0xcd0 */
struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
__u8 _pad9[276]; /* 0xeec */

View File

@ -512,7 +512,7 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
if (nr_e820_entries >= E820MAX)
return 1;
memcpy(&params->e820_map[nr_e820_entries], entry,
memcpy(&params->e820_array[nr_e820_entries], entry,
sizeof(struct e820_entry));
params->e820_entries++;
return 0;

View File

@ -2,7 +2,7 @@
* Handle the memory map.
* The functions here do the job until bootmem takes over.
*
* Getting sanitize_e820_map() in sync with i386 version by applying change:
* Getting sanitize_e820_array() in sync with i386 version by applying change:
* - Provisions for empty E820 memory regions (reported by certain BIOSes).
* Alex Achenbach <xela@slit.de>, December 2002.
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
@ -27,11 +27,11 @@
#include <asm/cpufeature.h>
/*
* The e820 map is the map that gets modified e.g. with command line parameters
* The e820 table is the array that gets modified e.g. with command line parameters
* and that is also registered with modifications in the kernel resource tree
* with the iomem_resource as parent.
*
* The e820_saved is directly saved after the BIOS-provided memory map is
* The e820_array_saved is directly saved after the BIOS-provided memory map is
* copied. It doesn't get modified afterwards. It's registered for the
* /sys/firmware/memmap interface.
*
@ -40,10 +40,10 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
static struct e820_array initial_e820 __initdata;
static struct e820_array initial_e820_saved __initdata;
struct e820_array *e820 __refdata = &initial_e820;
struct e820_array *e820_saved __refdata = &initial_e820_saved;
static struct e820_array e820_array_init __initdata;
static struct e820_array initial_e820_array_saved __initdata;
struct e820_array *e820_array __refdata = &e820_array_init;
struct e820_array *e820_array_saved __refdata = &initial_e820_array_saved;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0xaeedbabe;
@ -60,8 +60,8 @@ e820_any_mapped(u64 start, u64 end, unsigned type)
{
int i;
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
if (type && ei->type != type)
continue;
@ -83,8 +83,8 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
{
int i;
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
if (type && ei->type != type)
continue;
@ -130,7 +130,7 @@ static void __init __e820_add_region(struct e820_array *e820x, u64 start, u64 si
void __init e820_add_region(u64 start, u64 size, int type)
{
__e820_add_region(e820, start, size, type);
__e820_add_region(e820_array, start, size, type);
}
static void __init e820_print_type(u32 type)
@ -166,12 +166,12 @@ void __init e820_print_map(char *who)
{
int i;
for (i = 0; i < e820->nr_map; i++) {
for (i = 0; i < e820_array->nr_map; i++) {
printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
(unsigned long long) e820->map[i].addr,
(unsigned long long) e820_array->map[i].addr,
(unsigned long long)
(e820->map[i].addr + e820->map[i].size - 1));
e820_print_type(e820->map[i].type);
(e820_array->map[i].addr + e820_array->map[i].size - 1));
e820_print_type(e820_array->map[i].type);
printk(KERN_CONT "\n");
}
}
@ -195,7 +195,7 @@ void __init e820_print_map(char *who)
* sanitizing succeeds the *pnr_map will be updated with the new
* number of valid entries (something no more than max_nr_map).
*
* The return value from sanitize_e820_map() is zero if it
* The return value from sanitize_e820_array() is zero if it
* successfully 'sanitized' the map entries passed in, and is -1
* if it did nothing, which can happen if either of (1) it was
* only passed one map entry, or (2) any of the input map entries
@ -259,7 +259,7 @@ static int __init cpcompare(const void *a, const void *b)
return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr);
}
int __init sanitize_e820_map(struct e820_entry *biosmap, int max_nr_map,
int __init sanitize_e820_array(struct e820_entry *biosmap, int max_nr_map,
u32 *pnr_map)
{
static struct change_member change_point_list[2*E820_X_MAX] __initdata;
@ -385,7 +385,7 @@ int __init sanitize_e820_map(struct e820_entry *biosmap, int max_nr_map,
return 0;
}
static int __init __append_e820_map(struct e820_entry *biosmap, int nr_map)
static int __init __append_e820_array(struct e820_entry *biosmap, int nr_map)
{
while (nr_map) {
u64 start = biosmap->addr;
@ -414,13 +414,13 @@ static int __init __append_e820_map(struct e820_entry *biosmap, int nr_map)
* will have given us a memory map that we can use to properly
* set up memory. If we aren't, we'll fake a memory map.
*/
static int __init append_e820_map(struct e820_entry *biosmap, int nr_map)
static int __init append_e820_array(struct e820_entry *biosmap, int nr_map)
{
/* Only one memory region (or negative)? Ignore it */
if (nr_map < 2)
return -1;
return __append_e820_map(biosmap, nr_map);
return __append_e820_array(biosmap, nr_map);
}
static u64 __init __e820_update_range(struct e820_array *e820x, u64 start,
@ -495,13 +495,13 @@ static u64 __init __e820_update_range(struct e820_array *e820x, u64 start,
u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type)
{
return __e820_update_range(e820, start, size, old_type, new_type);
return __e820_update_range(e820_array, start, size, old_type, new_type);
}
static u64 __init e820_update_range_saved(u64 start, u64 size,
unsigned old_type, unsigned new_type)
{
return __e820_update_range(e820_saved, start, size, old_type,
return __e820_update_range(e820_array_saved, start, size, old_type,
new_type);
}
@ -523,8 +523,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
e820_print_type(old_type);
printk(KERN_CONT "\n");
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
u64 final_start, final_end;
u64 ei_end;
@ -568,15 +568,15 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
void __init update_e820(void)
{
if (sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map))
if (sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map))
return;
printk(KERN_INFO "e820: modified physical RAM map:\n");
e820_print_map("modified");
}
static void __init update_e820_saved(void)
static void __init update_e820_array_saved(void)
{
sanitize_e820_map(e820_saved->map, ARRAY_SIZE(e820_saved->map),
&e820_saved->nr_map);
sanitize_e820_array(e820_array_saved->map, ARRAY_SIZE(e820_array_saved->map),
&e820_array_saved->nr_map);
}
#define MAX_GAP_END 0x100000000ull
/*
@ -586,12 +586,12 @@ static int __init e820_search_gap(unsigned long *gapstart,
unsigned long *gapsize)
{
unsigned long long last = MAX_GAP_END;
int i = e820->nr_map;
int i = e820_array->nr_map;
int found = 0;
while (--i >= 0) {
unsigned long long start = e820->map[i].addr;
unsigned long long end = start + e820->map[i].size;
unsigned long long start = e820_array->map[i].addr;
unsigned long long end = start + e820_array->map[i].size;
/*
* Since "last" is at most 4GB, we know we'll
@ -650,7 +650,7 @@ __init void e820_setup_gap(void)
/*
* Called late during init, in free_initmem().
*
* Initial e820 and e820_saved are largish __initdata arrays.
* Initial e820 and e820_array_saved are largish __initdata arrays.
* Copy them to (usually much smaller) dynamically allocated area.
* This is done after all tweaks we ever do to them:
* all functions which modify them are __init functions,
@ -661,23 +661,23 @@ __init void e820_reallocate_tables(void)
struct e820_array *n;
int size;
size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820->nr_map;
size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820_array->nr_map;
n = kmalloc(size, GFP_KERNEL);
BUG_ON(!n);
memcpy(n, e820, size);
e820 = n;
memcpy(n, e820_array, size);
e820_array = n;
size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820_saved->nr_map;
size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820_array_saved->nr_map;
n = kmalloc(size, GFP_KERNEL);
BUG_ON(!n);
memcpy(n, e820_saved, size);
e820_saved = n;
memcpy(n, e820_array_saved, size);
e820_array_saved = n;
}
/**
* Because of the size limitation of struct boot_params, only first
* 128 E820 memory entries are passed to kernel via
* boot_params.e820_map, others are passed via SETUP_E820_EXT node of
* boot_params.e820_array, others are passed via SETUP_E820_EXT node of
* linked list of struct setup_data, which is parsed here.
*/
void __init parse_e820_ext(u64 phys_addr, u32 data_len)
@ -689,8 +689,8 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
sdata = early_memremap(phys_addr, data_len);
entries = sdata->len / sizeof(struct e820_entry);
extmap = (struct e820_entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
__append_e820_array(extmap, entries);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
early_memunmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
@ -709,8 +709,8 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
int i;
unsigned long pfn = 0;
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
if (pfn < PFN_UP(ei->addr))
register_nosave_region(pfn, PFN_UP(ei->addr));
@ -734,8 +734,8 @@ static int __init e820_mark_nvs_memory(void)
{
int i;
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
if (ei->type == E820_NVS)
acpi_nvs_register(ei->addr, ei->size);
@ -747,7 +747,7 @@ core_initcall(e820_mark_nvs_memory);
#endif
/*
* pre allocated 4k and reserved it in memblock and e820_saved
* pre allocated 4k and reserved it in memblock and e820_array_saved
*/
u64 __init early_reserve_e820(u64 size, u64 align)
{
@ -756,8 +756,8 @@ u64 __init early_reserve_e820(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
update_e820_saved();
printk(KERN_INFO "e820: update e820_array_saved for early_reserve_e820\n");
update_e820_array_saved();
}
return addr;
@ -782,8 +782,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
unsigned long last_pfn = 0;
unsigned long max_arch_pfn = MAX_ARCH_PFN;
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
unsigned long start_pfn;
unsigned long end_pfn;
@ -874,7 +874,7 @@ static int __init parse_memmap_one(char *p)
*/
saved_max_pfn = e820_end_of_ram_pfn();
#endif
e820->nr_map = 0;
e820_array->nr_map = 0;
userdef = 1;
return 0;
}
@ -921,8 +921,8 @@ early_param("memmap", parse_memmap_opt);
void __init finish_e820_parsing(void)
{
if (userdef) {
if (sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map),
&e820->nr_map) < 0)
if (sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map),
&e820_array->nr_map) < 0)
early_panic("Invalid user supplied memory map");
printk(KERN_INFO "e820: user-defined physical RAM map:\n");
@ -1009,35 +1009,35 @@ void __init e820_reserve_resources(void)
struct resource *res;
u64 end;
res = alloc_bootmem(sizeof(struct resource) * e820->nr_map);
res = alloc_bootmem(sizeof(struct resource) * e820_array->nr_map);
e820_res = res;
for (i = 0; i < e820->nr_map; i++) {
end = e820->map[i].addr + e820->map[i].size - 1;
for (i = 0; i < e820_array->nr_map; i++) {
end = e820_array->map[i].addr + e820_array->map[i].size - 1;
if (end != (resource_size_t)end) {
res++;
continue;
}
res->name = e820_type_to_string(e820->map[i].type);
res->start = e820->map[i].addr;
res->name = e820_type_to_string(e820_array->map[i].type);
res->start = e820_array->map[i].addr;
res->end = end;
res->flags = e820_type_to_iomem_type(e820->map[i].type);
res->desc = e820_type_to_iores_desc(e820->map[i].type);
res->flags = e820_type_to_iomem_type(e820_array->map[i].type);
res->desc = e820_type_to_iores_desc(e820_array->map[i].type);
/*
* don't register the region that could be conflicted with
* pci device BAR resource and insert them later in
* pcibios_resource_survey()
*/
if (do_mark_busy(e820->map[i].type, res)) {
if (do_mark_busy(e820_array->map[i].type, res)) {
res->flags |= IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
}
res++;
}
for (i = 0; i < e820_saved->nr_map; i++) {
struct e820_entry *entry = &e820_saved->map[i];
for (i = 0; i < e820_array_saved->nr_map; i++) {
struct e820_entry *entry = &e820_array_saved->map[i];
firmware_map_add_early(entry->addr,
entry->addr + entry->size,
e820_type_to_string(entry->type));
@ -1069,7 +1069,7 @@ void __init e820_reserve_resources_late(void)
struct resource *res;
res = e820_res;
for (i = 0; i < e820->nr_map; i++) {
for (i = 0; i < e820_array->nr_map; i++) {
if (!res->parent && res->end)
insert_resource_expand_to_fit(&iomem_resource, res);
res++;
@ -1079,8 +1079,8 @@ void __init e820_reserve_resources_late(void)
* Try to bump up RAM regions to reasonable boundaries to
* avoid stolen RAM:
*/
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *entry = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *entry = &e820_array->map[i];
u64 start, end;
if (entry->type != E820_RAM)
@ -1110,11 +1110,11 @@ char *__init default_machine_specific_memory_setup(void)
* the next section from 1mb->appropriate_mem_k
*/
new_nr = boot_params.e820_entries;
sanitize_e820_map(boot_params.e820_map,
ARRAY_SIZE(boot_params.e820_map),
sanitize_e820_array(boot_params.e820_array,
ARRAY_SIZE(boot_params.e820_array),
&new_nr);
boot_params.e820_entries = new_nr;
if (append_e820_map(boot_params.e820_map, boot_params.e820_entries)
if (append_e820_array(boot_params.e820_array, boot_params.e820_entries)
< 0) {
u64 mem_size;
@ -1128,7 +1128,7 @@ char *__init default_machine_specific_memory_setup(void)
who = "BIOS-e801";
}
e820->nr_map = 0;
e820_array->nr_map = 0;
e820_add_region(0, LOWMEMSIZE(), E820_RAM);
e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
}
@ -1142,7 +1142,7 @@ void __init setup_memory_map(void)
char *who;
who = x86_init.resources.memory_setup();
memcpy(e820_saved, e820, sizeof(struct e820_array));
memcpy(e820_array_saved, e820_array, sizeof(struct e820_array));
printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
@ -1159,8 +1159,8 @@ void __init memblock_x86_fill(void)
*/
memblock_allow_resize();
for (i = 0; i < e820->nr_map; i++) {
struct e820_entry *ei = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
struct e820_entry *ei = &e820_array->map[i];
end = ei->addr + ei->size;
if (end != (resource_size_t)end)

View File

@ -547,7 +547,7 @@ intel_graphics_stolen(int num, int slot, int func,
/* Mark this space as reserved */
e820_add_region(base, size, E820_RESERVED);
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
}
static void __init intel_graphics_quirks(int num, int slot, int func)

View File

@ -100,14 +100,14 @@ static int setup_e820_entries(struct boot_params *params)
{
unsigned int nr_e820_entries;
nr_e820_entries = e820_saved->nr_map;
nr_e820_entries = e820_array_saved->nr_map;
/* TODO: Pass entries more than E820MAX in bootparams setup data */
if (nr_e820_entries > E820MAX)
nr_e820_entries = E820MAX;
params->e820_entries = nr_e820_entries;
memcpy(&params->e820_map, &e820_saved->map,
memcpy(&params->e820_array, &e820_array_saved->map,
nr_e820_entries * sizeof(struct e820_entry));
return 0;
@ -233,10 +233,10 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
nr_e820_entries = params->e820_entries;
for (i = 0; i < nr_e820_entries; i++) {
if (params->e820_map[i].type != E820_RAM)
if (params->e820_array[i].type != E820_RAM)
continue;
start = params->e820_map[i].addr;
end = params->e820_map[i].addr + params->e820_map[i].size - 1;
start = params->e820_array[i].addr;
end = params->e820_array[i].addr + params->e820_array[i].size - 1;
if ((start <= 0x100000) && end > 0x100000) {
mem_k = (end >> 10) - (0x100000 >> 10);

View File

@ -27,8 +27,8 @@ static void remove_e820_regions(struct resource *avail)
int i;
struct e820_entry *entry;
for (i = 0; i < e820->nr_map; i++) {
entry = &e820->map[i];
for (i = 0; i < e820_array->nr_map; i++) {
entry = &e820_array->map[i];
resource_clip(avail, entry->addr,
entry->addr + entry->size - 1);

View File

@ -458,8 +458,8 @@ static void __init e820_reserve_setup_data(void)
early_memunmap(data, sizeof(*data));
}
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
memcpy(e820_saved, e820, sizeof(struct e820_array));
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
memcpy(e820_array_saved, e820_array, sizeof(struct e820_array));
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@ -763,7 +763,7 @@ static void __init trim_bios_range(void)
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
}
/* called before trim_bios_range() to spare extra sanitize */
@ -1026,7 +1026,7 @@ void __init setup_arch(char **cmdline_p)
early_dump_pci_devices();
#endif
/* update the e820_saved too */
/* update the e820_array_saved too */
e820_reserve_setup_data();
finish_e820_parsing();
@ -1056,7 +1056,7 @@ void __init setup_arch(char **cmdline_p)
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}

View File

@ -188,12 +188,12 @@ static int tboot_setup_sleep(void)
tboot->num_mac_regions = 0;
for (i = 0; i < e820->nr_map; i++) {
if ((e820->map[i].type != E820_RAM)
&& (e820->map[i].type != E820_RESERVED_KERN))
for (i = 0; i < e820_array->nr_map; i++) {
if ((e820_array->map[i].type != E820_RAM)
&& (e820_array->map[i].type != E820_RESERVED_KERN))
continue;
add_mac_region(e820->map[i].addr, e820->map[i].size);
add_mac_region(e820_array->map[i].addr, e820_array->map[i].size);
}
tboot->acpi_sinfo.kernel_s3_resume_vector =

View File

@ -1178,9 +1178,9 @@ static __init char *lguest_memory_setup(void)
* The Linux bootloader header contains an "e820" memory map: the
* Launcher populated the first entry with our memory limit.
*/
e820_add_region(boot_params.e820_map[0].addr,
boot_params.e820_map[0].size,
boot_params.e820_map[0].type);
e820_add_region(boot_params.e820_array[0].addr,
boot_params.e820_array[0].size,
boot_params.e820_array[0].type);
/* This string is for the boot messages. */
return "LGUEST";

View File

@ -167,7 +167,7 @@ static void __init do_add_efi_memmap(void)
}
e820_add_region(start, size, e820_type);
}
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
}
int __init efi_memblock_x86_reserve_range(void)

View File

@ -232,7 +232,7 @@ static int get_e820_md5(struct e820_array *map, void *buf)
static void hibernation_e820_save(void *buf)
{
get_e820_md5(e820_saved, buf);
get_e820_md5(e820_array_saved, buf);
}
static bool hibernation_e820_mismatch(void *buf)
@ -245,7 +245,7 @@ static bool hibernation_e820_mismatch(void *buf)
if (!memcmp(result, buf, MD5_DIGEST_SIZE))
return false;
ret = get_e820_md5(e820_saved, result);
ret = get_e820_md5(e820_array_saved, result);
if (ret)
return true;

View File

@ -41,8 +41,8 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
unsigned long xen_released_pages;
/* E820 map used during setting up memory. */
static struct e820_entry xen_e820_map[E820_X_MAX] __initdata;
static u32 xen_e820_map_entries __initdata;
static struct e820_entry xen_e820_array[E820_X_MAX] __initdata;
static u32 xen_e820_array_entries __initdata;
/*
* Buffer used to remap identity mapped pages. We only need the virtual space.
@ -198,11 +198,11 @@ void __init xen_inv_extra_mem(void)
*/
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{
const struct e820_entry *entry = xen_e820_map;
const struct e820_entry *entry = xen_e820_array;
unsigned int i;
unsigned long done = 0;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
for (i = 0; i < xen_e820_array_entries; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
@ -457,7 +457,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
{
phys_addr_t start = 0;
unsigned long ret_val = 0;
const struct e820_entry *entry = xen_e820_map;
const struct e820_entry *entry = xen_e820_array;
int i;
/*
@ -471,9 +471,9 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
for (i = 0; i < xen_e820_array_entries; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
if (entry->type == E820_RAM || i == xen_e820_array_entries - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
@ -601,10 +601,10 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start,
static void __init xen_ignore_unusable(void)
{
struct e820_entry *entry = xen_e820_map;
struct e820_entry *entry = xen_e820_array;
unsigned int i;
for (i = 0; i < xen_e820_map_entries; i++, entry++) {
for (i = 0; i < xen_e820_array_entries; i++, entry++) {
if (entry->type == E820_UNUSABLE)
entry->type = E820_RAM;
}
@ -620,9 +620,9 @@ bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
return false;
end = start + size;
entry = xen_e820_map;
entry = xen_e820_array;
for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
for (mapcnt = 0; mapcnt < xen_e820_array_entries; mapcnt++) {
if (entry->type == E820_RAM && entry->addr <= start &&
(entry->addr + entry->size) >= end)
return false;
@ -645,9 +645,9 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
{
unsigned mapcnt;
phys_addr_t addr, start;
struct e820_entry *entry = xen_e820_map;
struct e820_entry *entry = xen_e820_array;
for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
for (mapcnt = 0; mapcnt < xen_e820_array_entries; mapcnt++, entry++) {
if (entry->type != E820_RAM || entry->size < size)
continue;
start = entry->addr;
@ -750,8 +750,8 @@ char * __init xen_memory_setup(void)
max_pfn = min(max_pfn, xen_start_info->nr_pages);
mem_end = PFN_PHYS(max_pfn);
memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
memmap.nr_entries = ARRAY_SIZE(xen_e820_array);
set_xen_guest_handle(memmap.buffer, xen_e820_array);
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
@ -760,16 +760,16 @@ char * __init xen_memory_setup(void)
if (rc == -ENOSYS) {
BUG_ON(xen_initial_domain());
memmap.nr_entries = 1;
xen_e820_map[0].addr = 0ULL;
xen_e820_map[0].size = mem_end;
xen_e820_array[0].addr = 0ULL;
xen_e820_array[0].size = mem_end;
/* 8MB slack (to balance backend allocations). */
xen_e820_map[0].size += 8ULL << 20;
xen_e820_map[0].type = E820_RAM;
xen_e820_array[0].size += 8ULL << 20;
xen_e820_array[0].type = E820_RAM;
rc = 0;
}
BUG_ON(rc);
BUG_ON(memmap.nr_entries == 0);
xen_e820_map_entries = memmap.nr_entries;
xen_e820_array_entries = memmap.nr_entries;
/*
* Xen won't allow a 1:1 mapping to be created to UNUSABLE
@ -783,8 +783,8 @@ char * __init xen_memory_setup(void)
xen_ignore_unusable();
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
&xen_e820_map_entries);
sanitize_e820_array(xen_e820_array, ARRAY_SIZE(xen_e820_array),
&xen_e820_array_entries);
max_pages = xen_get_max_pages();
@ -811,13 +811,13 @@ char * __init xen_memory_setup(void)
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_map[0].addr;
size = xen_e820_map[0].size;
while (i < xen_e820_map_entries) {
addr = xen_e820_array[0].addr;
size = xen_e820_array[0].size;
while (i < xen_e820_array_entries) {
bool discard = false;
chunk_size = size;
type = xen_e820_map[i].type;
type = xen_e820_array[i].type;
if (type == E820_RAM) {
if (addr < mem_end) {
@ -840,9 +840,9 @@ char * __init xen_memory_setup(void)
size -= chunk_size;
if (size == 0) {
i++;
if (i < xen_e820_map_entries) {
addr = xen_e820_map[i].addr;
size = xen_e820_map[i].size;
if (i < xen_e820_array_entries) {
addr = xen_e820_array[i].addr;
size = xen_e820_array[i].size;
}
}
}
@ -861,7 +861,7 @@ char * __init xen_memory_setup(void)
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
/*
* Check whether the kernel itself conflicts with the target E820 map.
@ -923,21 +923,21 @@ char * __init xen_auto_xlated_memory_setup(void)
int i;
int rc;
memmap.nr_entries = ARRAY_SIZE(xen_e820_map);
set_xen_guest_handle(memmap.buffer, xen_e820_map);
memmap.nr_entries = ARRAY_SIZE(xen_e820_array);
set_xen_guest_handle(memmap.buffer, xen_e820_array);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc < 0)
panic("No memory map (%d)\n", rc);
xen_e820_map_entries = memmap.nr_entries;
xen_e820_array_entries = memmap.nr_entries;
sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
&xen_e820_map_entries);
sanitize_e820_array(xen_e820_array, ARRAY_SIZE(xen_e820_array),
&xen_e820_array_entries);
for (i = 0; i < xen_e820_map_entries; i++)
e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
xen_e820_map[i].type);
for (i = 0; i < xen_e820_array_entries; i++)
e820_add_region(xen_e820_array[i].addr, xen_e820_array[i].size,
xen_e820_array[i].type);
/* Remove p2m info, it is not needed. */
xen_start_info->mfn_list = 0;

View File

@ -3339,7 +3339,7 @@ int main(int argc, char *argv[])
* simple, single region.
*/
boot->e820_entries = 1;
boot->e820_map[0] = ((struct e820_entry) { 0, mem, E820_RAM });
boot->e820_array[0] = ((struct e820_entry) { 0, mem, E820_RAM });
/*
* The boot header contains a command line pointer: we put the command
* line after the boot header.