summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-01-27 13:54:38 +0100
committerIngo Molnar <mingo@kernel.org>2017-01-28 09:33:16 +0100
commit61a50101638254d38e3f4281265b44de0f2cba4e (patch)
treec57b88b903c3b5d01b2742b63e02e98aa1d91225 /arch
parentacd4c048728814505fae8e224cf9074bd1ad291e (diff)
x86/boot/e820: Rename everything to e820_table
No change in functionality. Cc: Alex Thorlton <athorlton@sgi.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Huang, Ying <ying.huang@intel.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Jackson <pj@sgi.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rjw@sisk.pl> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/boot/compressed/eboot.c22
-rw-r--r--arch/x86/boot/compressed/kaslr.c2
-rw-r--r--arch/x86/boot/memory.c4
-rw-r--r--arch/x86/include/asm/e820/api.h6
-rw-r--r--arch/x86/include/asm/e820/types.h4
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/e820.c152
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c10
-rw-r--r--arch/x86/kernel/resource.c4
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/tboot.c8
-rw-r--r--arch/x86/lguest/boot.c6
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/power/hibernate_64.c8
-rw-r--r--arch/x86/xen/setup.c76
17 files changed, 160 insertions, 160 deletions
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index b5cc1e72c63a..04c406f9aee3 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -917,7 +917,7 @@ static void add_e820ext(struct boot_params *params,
static efi_status_t setup_e820(struct boot_params *params,
struct setup_data *e820ext, u32 e820ext_size)
{
- struct e820_entry *e820_array = &params->e820_array[0];
+ struct e820_entry *e820_table = &params->e820_table[0];
struct efi_info *efi = &params->efi_info;
struct e820_entry *prev = NULL;
u32 nr_entries;
@@ -982,7 +982,7 @@ static efi_status_t setup_e820(struct boot_params *params,
continue;
}
- if (nr_entries == ARRAY_SIZE(params->e820_array)) {
+ if (nr_entries == ARRAY_SIZE(params->e820_table)) {
u32 need = (nr_desc - i) * sizeof(struct e820_entry) +
sizeof(struct setup_data);
@@ -990,18 +990,18 @@ static efi_status_t setup_e820(struct boot_params *params,
return EFI_BUFFER_TOO_SMALL;
/* boot_params map full, switch to e820 extended */
- e820_array = (struct e820_entry *)e820ext->data;
+ e820_table = (struct e820_entry *)e820ext->data;
}
- e820_array->addr = d->phys_addr;
- e820_array->size = d->num_pages << PAGE_SHIFT;
- e820_array->type = e820_type;
- prev = e820_array++;
+ e820_table->addr = d->phys_addr;
+ e820_table->size = d->num_pages << PAGE_SHIFT;
+ e820_table->type = e820_type;
+ prev = e820_table++;
nr_entries++;
}
- if (nr_entries > ARRAY_SIZE(params->e820_array)) {
- u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_array);
+ if (nr_entries > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_entries - ARRAY_SIZE(params->e820_table);
add_e820ext(params, e820ext, nr_e820ext);
nr_entries -= nr_e820ext;
@@ -1055,9 +1055,9 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
if (first) {
nr_desc = *map->buff_size / *map->desc_size;
- if (nr_desc > ARRAY_SIZE(p->boot_params->e820_array)) {
+ if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
u32 nr_e820ext = nr_desc -
- ARRAY_SIZE(p->boot_params->e820_array);
+ ARRAY_SIZE(p->boot_params->e820_table);
status = alloc_e820ext(nr_e820ext, &p->e820ext,
&p->e820ext_size);
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index f0e37236cfe1..a47f832664f2 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -518,7 +518,7 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
/* Verify potential e820 positions, appending to slots list. */
for (i = 0; i < boot_params->e820_entries; i++) {
- process_e820_entry(&boot_params->e820_array[i], minimum,
+ process_e820_entry(&boot_params->e820_table[i], minimum,
image_size);
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820 scan (slot_areas full)!\n");
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index f2132b3e6880..db62445b75aa 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -21,7 +21,7 @@ static int detect_memory_e820(void)
{
int count = 0;
struct biosregs ireg, oreg;
- struct e820_entry *desc = boot_params.e820_array;
+ struct e820_entry *desc = boot_params.e820_table;
static struct e820_entry buf; /* static so it is zeroed */
initregs(&ireg);
@@ -66,7 +66,7 @@ static int detect_memory_e820(void)
*desc++ = buf;
count++;
- } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_array));
+ } while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_table));
return boot_params.e820_entries = count;
}
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index 0156532dd65a..91a5c346f765 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -4,8 +4,8 @@
#include <asm/e820/types.h>
/* see comment in arch/x86/kernel/e820.c */
-extern struct e820_array *e820_array;
-extern struct e820_array *e820_array_saved;
+extern struct e820_table *e820_table;
+extern struct e820_table *e820_table_saved;
extern unsigned long pci_mem_start;
@@ -13,7 +13,7 @@ extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
-extern int sanitize_e820_array(struct e820_entry *biosmap, int max_nr_map, u32 *pnr_map);
+extern int sanitize_e820_table(struct e820_entry *biosmap, int max_nr_map, u32 *pnr_map);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type, unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type, int checktype);
extern void update_e820(void);
diff --git a/arch/x86/include/asm/e820/types.h b/arch/x86/include/asm/e820/types.h
index e3171867e126..ae2c4e9db0d4 100644
--- a/arch/x86/include/asm/e820/types.h
+++ b/arch/x86/include/asm/e820/types.h
@@ -21,7 +21,7 @@
*
* This allows for bootstrap/firmware quirks such as possible duplicate
* E820 entries that might need room in the same arrays, prior to the
- * call to sanitize_e820_array() to remove duplicates. The allowance
+ * call to sanitize_e820_table() to remove duplicates. The allowance
* of three memory map entries per node is "enough" entries for
* the initial hardware platform motivating this mechanism to make
* use of additional EFI map entries. Future platforms may want
@@ -68,7 +68,7 @@
/*
* The whole array of E820 entries:
*/
-struct e820_array {
+struct e820_table {
__u32 nr_map;
struct e820_entry map[E820_X_MAX];
};
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 702f1c6d804e..5f41518f7159 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -152,7 +152,7 @@ struct boot_params {
struct setup_header hdr; /* setup header */ /* 0x1f1 */
__u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
__u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
- struct e820_entry e820_array[E820MAX]; /* 0x2d0 */
+ struct e820_entry e820_table[E820MAX]; /* 0x2d0 */
__u8 _pad8[48]; /* 0xcd0 */
struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
__u8 _pad9[276]; /* 0xeec */
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 036d9140384d..11f7eb1e2506 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -512,7 +512,7 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
if (nr_e820_entries >= E820MAX)
return 1;
- memcpy(&params->e820_array[nr_e820_entries], entry,
+ memcpy(&params->e820_table[nr_e820_entries], entry,
sizeof(struct e820_entry));
params->e820_entries++;
return 0;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 49d47d6b1afe..ffaf94357d15 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -2,7 +2,7 @@
* Handle the memory map.
* The functions here do the job until bootmem takes over.
*
- * Getting sanitize_e820_array() in sync with i386 version by applying change:
+ * Getting sanitize_e820_table() in sync with i386 version by applying change:
* - Provisions for empty E820 memory regions (reported by certain BIOSes).
* Alex Achenbach <xela@slit.de>, December 2002.
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
@@ -31,7 +31,7 @@
* and that is also registered with modifications in the kernel resource tree
* with the iomem_resource as parent.
*
- * The e820_array_saved is directly saved after the BIOS-provided memory map is
+ * The e820_table_saved is directly saved after the BIOS-provided memory map is
* copied. It doesn't get modified afterwards. It's registered for the
* /sys/firmware/memmap interface.
*
@@ -40,10 +40,10 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
-static struct e820_array e820_array_init __initdata;
-static struct e820_array initial_e820_array_saved __initdata;
-struct e820_array *e820_array __refdata = &e820_array_init;
-struct e820_array *e820_array_saved __refdata = &initial_e820_array_saved;
+static struct e820_table e820_table_init __initdata;
+static struct e820_table initial_e820_table_saved __initdata;
+struct e820_table *e820_table __refdata = &e820_table_init;
+struct e820_table *e820_table_saved __refdata = &initial_e820_table_saved;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0xaeedbabe;
@@ -60,8 +60,8 @@ e820_any_mapped(u64 start, u64 end, unsigned type)
{
int i;
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
if (type && ei->type != type)
continue;
@@ -83,8 +83,8 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
{
int i;
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
if (type && ei->type != type)
continue;
@@ -110,7 +110,7 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
/*
* Add a memory region to the kernel e820 map.
*/
-static void __init __e820_add_region(struct e820_array *e820x, u64 start, u64 size,
+static void __init __e820_add_region(struct e820_table *e820x, u64 start, u64 size,
int type)
{
int x = e820x->nr_map;
@@ -130,7 +130,7 @@ static void __init __e820_add_region(struct e820_array *e820x, u64 start, u64 si
void __init e820_add_region(u64 start, u64 size, int type)
{
- __e820_add_region(e820_array, start, size, type);
+ __e820_add_region(e820_table, start, size, type);
}
static void __init e820_print_type(u32 type)
@@ -166,12 +166,12 @@ void __init e820_print_map(char *who)
{
int i;
- for (i = 0; i < e820_array->nr_map; i++) {
+ for (i = 0; i < e820_table->nr_map; i++) {
printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
- (unsigned long long) e820_array->map[i].addr,
+ (unsigned long long) e820_table->map[i].addr,
(unsigned long long)
- (e820_array->map[i].addr + e820_array->map[i].size - 1));
- e820_print_type(e820_array->map[i].type);
+ (e820_table->map[i].addr + e820_table->map[i].size - 1));
+ e820_print_type(e820_table->map[i].type);
printk(KERN_CONT "\n");
}
}
@@ -195,7 +195,7 @@ void __init e820_print_map(char *who)
* sanitizing succeeds the *pnr_map will be updated with the new
* number of valid entries (something no more than max_nr_map).
*
- * The return value from sanitize_e820_array() is zero if it
+ * The return value from sanitize_e820_table() is zero if it
* successfully 'sanitized' the map entries passed in, and is -1
* if it did nothing, which can happen if either of (1) it was
* only passed one map entry, or (2) any of the input map entries
@@ -259,7 +259,7 @@ static int __init cpcompare(const void *a, const void *b)
return (ap->addr != ap->pbios->addr) - (bp->addr != bp->pbios->addr);
}
-int __init sanitize_e820_array(struct e820_entry *biosmap, int max_nr_map,
+int __init sanitize_e820_table(struct e820_entry *biosmap, int max_nr_map,
u32 *pnr_map)
{
static struct change_member change_point_list[2*E820_X_MAX] __initdata;
@@ -385,7 +385,7 @@ int __init sanitize_e820_array(struct e820_entry *biosmap, int max_nr_map,
return 0;
}
-static int __init __append_e820_array(struct e820_entry *biosmap, int nr_map)
+static int __init __append_e820_table(struct e820_entry *biosmap, int nr_map)
{
while (nr_map) {
u64 start = biosmap->addr;
@@ -414,16 +414,16 @@ static int __init __append_e820_array(struct e820_entry *biosmap, int nr_map)
* will have given us a memory map that we can use to properly
* set up memory. If we aren't, we'll fake a memory map.
*/
-static int __init append_e820_array(struct e820_entry *biosmap, int nr_map)
+static int __init append_e820_table(struct e820_entry *biosmap, int nr_map)
{
/* Only one memory region (or negative)? Ignore it */
if (nr_map < 2)
return -1;
- return __append_e820_array(biosmap, nr_map);
+ return __append_e820_table(biosmap, nr_map);
}
-static u64 __init __e820_update_range(struct e820_array *e820x, u64 start,
+static u64 __init __e820_update_range(struct e820_table *e820x, u64 start,
u64 size, unsigned old_type,
unsigned new_type)
{
@@ -495,13 +495,13 @@ static u64 __init __e820_update_range(struct e820_array *e820x, u64 start,
u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type)
{
- return __e820_update_range(e820_array, start, size, old_type, new_type);
+ return __e820_update_range(e820_table, start, size, old_type, new_type);
}
static u64 __init e820_update_range_saved(u64 start, u64 size,
unsigned old_type, unsigned new_type)
{
- return __e820_update_range(e820_array_saved, start, size, old_type,
+ return __e820_update_range(e820_table_saved, start, size, old_type,
new_type);
}
@@ -523,8 +523,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
e820_print_type(old_type);
printk(KERN_CONT "\n");
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
u64 final_start, final_end;
u64 ei_end;
@@ -568,15 +568,15 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
void __init update_e820(void)
{
- if (sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map))
+ if (sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map))
return;
printk(KERN_INFO "e820: modified physical RAM map:\n");
e820_print_map("modified");
}
-static void __init update_e820_array_saved(void)
+static void __init update_e820_table_saved(void)
{
- sanitize_e820_array(e820_array_saved->map, ARRAY_SIZE(e820_array_saved->map),
- &e820_array_saved->nr_map);
+ sanitize_e820_table(e820_table_saved->map, ARRAY_SIZE(e820_table_saved->map),
+ &e820_table_saved->nr_map);
}
#define MAX_GAP_END 0x100000000ull
/*
@@ -586,12 +586,12 @@ static int __init e820_search_gap(unsigned long *gapstart,
unsigned long *gapsize)
{
unsigned long long last = MAX_GAP_END;
- int i = e820_array->nr_map;
+ int i = e820_table->nr_map;
int found = 0;
while (--i >= 0) {
- unsigned long long start = e820_array->map[i].addr;
- unsigned long long end = start + e820_array->map[i].size;
+ unsigned long long start = e820_table->map[i].addr;
+ unsigned long long end = start + e820_table->map[i].size;
/*
* Since "last" is at most 4GB, we know we'll
@@ -650,7 +650,7 @@ __init void e820_setup_gap(void)
/*
* Called late during init, in free_initmem().
*
- * Initial e820 and e820_array_saved are largish __initdata arrays.
+ * Initial e820 and e820_table_saved are largish __initdata arrays.
* Copy them to (usually much smaller) dynamically allocated area.
* This is done after all tweaks we ever do to them:
* all functions which modify them are __init functions,
@@ -658,26 +658,26 @@ __init void e820_setup_gap(void)
*/
__init void e820_reallocate_tables(void)
{
- struct e820_array *n;
+ struct e820_table *n;
int size;
- size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820_array->nr_map;
+ size = offsetof(struct e820_table, map) + sizeof(struct e820_entry) * e820_table->nr_map;
n = kmalloc(size, GFP_KERNEL);
BUG_ON(!n);
- memcpy(n, e820_array, size);
- e820_array = n;
+ memcpy(n, e820_table, size);
+ e820_table = n;
- size = offsetof(struct e820_array, map) + sizeof(struct e820_entry) * e820_array_saved->nr_map;
+ size = offsetof(struct e820_table, map) + sizeof(struct e820_entry) * e820_table_saved->nr_map;
n = kmalloc(size, GFP_KERNEL);
BUG_ON(!n);
- memcpy(n, e820_array_saved, size);
- e820_array_saved = n;
+ memcpy(n, e820_table_saved, size);
+ e820_table_saved = n;
}
/**
* Because of the size limitation of struct boot_params, only first
* 128 E820 memory entries are passed to kernel via
- * boot_params.e820_array, others are passed via SETUP_E820_EXT node of
+ * boot_params.e820_table, others are passed via SETUP_E820_EXT node of
* linked list of struct setup_data, which is parsed here.
*/
void __init parse_e820_ext(u64 phys_addr, u32 data_len)
@@ -689,8 +689,8 @@ void __init parse_e820_ext(u64 phys_addr, u32 data_len)
sdata = early_memremap(phys_addr, data_len);
entries = sdata->len / sizeof(struct e820_entry);
extmap = (struct e820_entry *)(sdata->data);
- __append_e820_array(extmap, entries);
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ __append_e820_table(extmap, entries);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
early_memunmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
@@ -709,8 +709,8 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
int i;
unsigned long pfn = 0;
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
if (pfn < PFN_UP(ei->addr))
register_nosave_region(pfn, PFN_UP(ei->addr));
@@ -734,8 +734,8 @@ static int __init e820_mark_nvs_memory(void)
{
int i;
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
if (ei->type == E820_NVS)
acpi_nvs_register(ei->addr, ei->size);
@@ -747,7 +747,7 @@ core_initcall(e820_mark_nvs_memory);
#endif
/*
- * pre allocated 4k and reserved it in memblock and e820_array_saved
+ * pre allocated 4k and reserved it in memblock and e820_table_saved
*/
u64 __init early_reserve_e820(u64 size, u64 align)
{
@@ -756,8 +756,8 @@ u64 __init early_reserve_e820(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "e820: update e820_array_saved for early_reserve_e820\n");
- update_e820_array_saved();
+ printk(KERN_INFO "e820: update e820_table_saved for early_reserve_e820\n");
+ update_e820_table_saved();
}
return addr;
@@ -782,8 +782,8 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
unsigned long last_pfn = 0;
unsigned long max_arch_pfn = MAX_ARCH_PFN;
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
unsigned long start_pfn;
unsigned long end_pfn;
@@ -874,7 +874,7 @@ static int __init parse_memmap_one(char *p)
*/
saved_max_pfn = e820_end_of_ram_pfn();
#endif
- e820_array->nr_map = 0;
+ e820_table->nr_map = 0;
userdef = 1;
return 0;
}
@@ -921,8 +921,8 @@ early_param("memmap", parse_memmap_opt);
void __init finish_e820_parsing(void)
{
if (userdef) {
- if (sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map),
- &e820_array->nr_map) < 0)
+ if (sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map),
+ &e820_table->nr_map) < 0)
early_panic("Invalid user supplied memory map");
printk(KERN_INFO "e820: user-defined physical RAM map:\n");
@@ -1009,35 +1009,35 @@ void __init e820_reserve_resources(void)
struct resource *res;
u64 end;
- res = alloc_bootmem(sizeof(struct resource) * e820_array->nr_map);
+ res = alloc_bootmem(sizeof(struct resource) * e820_table->nr_map);
e820_res = res;
- for (i = 0; i < e820_array->nr_map; i++) {
- end = e820_array->map[i].addr + e820_array->map[i].size - 1;
+ for (i = 0; i < e820_table->nr_map; i++) {
+ end = e820_table->map[i].addr + e820_table->map[i].size - 1;
if (end != (resource_size_t)end) {
res++;
continue;
}
- res->name = e820_type_to_string(e820_array->map[i].type);
- res->start = e820_array->map[i].addr;
+ res->name = e820_type_to_string(e820_table->map[i].type);
+ res->start = e820_table->map[i].addr;
res->end = end;
- res->flags = e820_type_to_iomem_type(e820_array->map[i].type);
- res->desc = e820_type_to_iores_desc(e820_array->map[i].type);
+ res->flags = e820_type_to_iomem_type(e820_table->map[i].type);
+ res->desc = e820_type_to_iores_desc(e820_table->map[i].type);
/*
* don't register the region that could be conflicted with
* pci device BAR resource and insert them later in
* pcibios_resource_survey()
*/
- if (do_mark_busy(e820_array->map[i].type, res)) {
+ if (do_mark_busy(e820_table->map[i].type, res)) {
res->flags |= IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
}
res++;
}
- for (i = 0; i < e820_array_saved->nr_map; i++) {
- struct e820_entry *entry = &e820_array_saved->map[i];
+ for (i = 0; i < e820_table_saved->nr_map; i++) {
+ struct e820_entry *entry = &e820_table_saved->map[i];
firmware_map_add_early(entry->addr,
entry->addr + entry->size,
e820_type_to_string(entry->type));
@@ -1069,7 +1069,7 @@ void __init e820_reserve_resources_late(void)
struct resource *res;
res = e820_res;
- for (i = 0; i < e820_array->nr_map; i++) {
+ for (i = 0; i < e820_table->nr_map; i++) {
if (!res->parent && res->end)
insert_resource_expand_to_fit(&iomem_resource, res);
res++;
@@ -1079,8 +1079,8 @@ void __init e820_reserve_resources_late(void)
* Try to bump up RAM regions to reasonable boundaries to
* avoid stolen RAM:
*/
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *entry = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *entry = &e820_table->map[i];
u64 start, end;
if (entry->type != E820_RAM)
@@ -1110,11 +1110,11 @@ char *__init default_machine_specific_memory_setup(void)
* the next section from 1mb->appropriate_mem_k
*/
new_nr = boot_params.e820_entries;
- sanitize_e820_array(boot_params.e820_array,
- ARRAY_SIZE(boot_params.e820_array),
+ sanitize_e820_table(boot_params.e820_table,
+ ARRAY_SIZE(boot_params.e820_table),
&new_nr);
boot_params.e820_entries = new_nr;
- if (append_e820_array(boot_params.e820_array, boot_params.e820_entries)
+ if (append_e820_table(boot_params.e820_table, boot_params.e820_entries)
< 0) {
u64 mem_size;
@@ -1128,7 +1128,7 @@ char *__init default_machine_specific_memory_setup(void)
who = "BIOS-e801";
}
- e820_array->nr_map = 0;
+ e820_table->nr_map = 0;
e820_add_region(0, LOWMEMSIZE(), E820_RAM);
e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
}
@@ -1142,7 +1142,7 @@ void __init setup_memory_map(void)
char *who;
who = x86_init.resources.memory_setup();
- memcpy(e820_array_saved, e820_array, sizeof(struct e820_array));
+ memcpy(e820_table_saved, e820_table, sizeof(struct e820_table));
printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
@@ -1159,8 +1159,8 @@ void __init memblock_x86_fill(void)
*/
memblock_allow_resize();
- for (i = 0; i < e820_array->nr_map; i++) {
- struct e820_entry *ei = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ struct e820_entry *ei = &e820_table->map[i];
end = ei->addr + ei->size;
if (end != (resource_size_t)end)
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 31d755534c36..a6cdf9b9c7a9 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -547,7 +547,7 @@ intel_graphics_stolen(int num, int slot, int func,
/* Mark this space as reserved */
e820_add_region(base, size, E820_RESERVED);
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
}
static void __init intel_graphics_quirks(int num, int slot, int func)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 6bedcb9bad1e..39d340ac50eb 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -100,14 +100,14 @@ static int setup_e820_entries(struct boot_params *params)
{
unsigned int nr_e820_entries;
- nr_e820_entries = e820_array_saved->nr_map;
+ nr_e820_entries = e820_table_saved->nr_map;
/* TODO: Pass entries more than E820MAX in bootparams setup data */
if (nr_e820_entries > E820MAX)
nr_e820_entries = E820MAX;
params->e820_entries = nr_e820_entries;
- memcpy(&params->e820_array, &e820_array_saved->map,
+ memcpy(&params->e820_table, &e820_table_saved->map,
nr_e820_entries * sizeof(struct e820_entry));
return 0;
@@ -233,10 +233,10 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
nr_e820_entries = params->e820_entries;
for (i = 0; i < nr_e820_entries; i++) {
- if (params->e820_array[i].type != E820_RAM)
+ if (params->e820_table[i].type != E820_RAM)
continue;
- start = params->e820_array[i].addr;
- end = params->e820_array[i].addr + params->e820_array[i].size - 1;
+ start = params->e820_table[i].addr;
+ end = params->e820_table[i].addr + params->e820_table[i].size - 1;
if ((start <= 0x100000) && end > 0x100000) {
mem_k = (end >> 10) - (0x100000 >> 10);
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
index aeec503e0a6d..17b494d4bb86 100644
--- a/arch/x86/kernel/resource.c
+++ b/arch/x86/kernel/resource.c
@@ -27,8 +27,8 @@ static void remove_e820_regions(struct resource *avail)
int i;
struct e820_entry *entry;
- for (i = 0; i < e820_array->nr_map; i++) {
- entry = &e820_array->map[i];
+ for (i = 0; i < e820_table->nr_map; i++) {
+ entry = &e820_table->map[i];
resource_clip(avail, entry->addr,
entry->addr + entry->size - 1);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index a91db2edee9e..a3c1d39116b7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -458,8 +458,8 @@ static void __init e820_reserve_setup_data(void)
early_memunmap(data, sizeof(*data));
}
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
- memcpy(e820_array_saved, e820_array, sizeof(struct e820_array));
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
+ memcpy(e820_table_saved, e820_table, sizeof(struct e820_table));
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@@ -763,7 +763,7 @@ static void __init trim_bios_range(void)
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
}
/* called before trim_bios_range() to spare extra sanitize */
@@ -1026,7 +1026,7 @@ void __init setup_arch(char **cmdline_p)
early_dump_pci_devices();
#endif
- /* update the e820_array_saved too */
+ /* update the e820_table_saved too */
e820_reserve_setup_data();
finish_e820_parsing();
@@ -1056,7 +1056,7 @@ void __init setup_arch(char **cmdline_p)
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 705f1207181a..825822688aa5 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -188,12 +188,12 @@ static int tboot_setup_sleep(void)
tboot->num_mac_regions = 0;
- for (i = 0; i < e820_array->nr_map; i++) {
- if ((e820_array->map[i].type != E820_RAM)
- && (e820_array->map[i].type != E820_RESERVED_KERN))
+ for (i = 0; i < e820_table->nr_map; i++) {
+ if ((e820_table->map[i].type != E820_RAM)
+ && (e820_table->map[i].type != E820_RESERVED_KERN))
continue;
- add_mac_region(e820_array->map[i].addr, e820_array->map[i].size);
+ add_mac_region(e820_table->map[i].addr, e820_table->map[i].size);
}
tboot->acpi_sinfo.kernel_s3_resume_vector =
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 09226a33e418..fc42e9604562 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1178,9 +1178,9 @@ static __init char *lguest_memory_setup(void)
* The Linux bootloader header contains an "e820" memory map: the
* Launcher populated the first entry with our memory limit.
*/
- e820_add_region(boot_params.e820_array[0].addr,
- boot_params.e820_array[0].size,
- boot_params.e820_array[0].type);
+ e820_add_region(boot_params.e820_table[0].addr,
+ boot_params.e820_table[0].size,
+ boot_params.e820_table[0].type);
/* This string is for the boot messages. */
return "LGUEST";
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2543a11c9df8..9e055f6859e5 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -167,7 +167,7 @@ static void __init do_add_efi_memmap(void)
}
e820_add_region(start, size, e820_type);
}
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
}
int __init efi_memblock_x86_reserve_range(void)
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index c1a245e64ed1..8fe5be0e90b2 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -201,7 +201,7 @@ struct restore_data_record {
* @map: the e820 map to be calculated
* @buf: the md5 result to be stored to
*/
-static int get_e820_md5(struct e820_array *map, void *buf)
+static int get_e820_md5(struct e820_table *map, void *buf)
{
struct scatterlist sg;
struct crypto_ahash *tfm;
@@ -214,7 +214,7 @@ static int get_e820_md5(struct e820_array *map, void *buf)
{
AHASH_REQUEST_ON_STACK(req, tfm);
- size = offsetof(struct e820_array, map)
+ size = offsetof(struct e820_table, map)
+ sizeof(struct e820_entry) * map->nr_map;
ahash_request_set_tfm(req, tfm);
sg_init_one(&sg, (u8 *)map, size);
@@ -232,7 +232,7 @@ static int get_e820_md5(struct e820_array *map, void *buf)
static void hibernation_e820_save(void *buf)
{
- get_e820_md5(e820_array_saved, buf);
+ get_e820_md5(e820_table_saved, buf);
}
static bool hibernation_e820_mismatch(void *buf)
@@ -245,7 +245,7 @@ static bool hibernation_e820_mismatch(void *buf)
if (!memcmp(result, buf, MD5_DIGEST_SIZE))
return false;
- ret = get_e820_md5(e820_array_saved, result);
+ ret = get_e820_md5(e820_table_saved, result);
if (ret)
return true;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3cc90e902064..f98713e5a705 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -41,8 +41,8 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
unsigned long xen_released_pages;
/* E820 map used during setting up memory. */
-static struct e820_entry xen_e820_array[E820_X_MAX] __initdata;
-static u32 xen_e820_array_entries __initdata;
+static struct e820_entry xen_e820_table[E820_X_MAX] __initdata;
+static u32 xen_e820_table_entries __initdata;
/*
* Buffer used to remap identity mapped pages. We only need the virtual space.
@@ -198,11 +198,11 @@ void __init xen_inv_extra_mem(void)
*/
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{
- const struct e820_entry *entry = xen_e820_array;
+ const struct e820_entry *entry = xen_e820_table;
unsigned int i;
unsigned long done = 0;
- for (i = 0; i < xen_e820_array_entries; i++, entry++) {
+ for (i = 0; i < xen_e820_table_entries; i++, entry++) {
unsigned long s_pfn;
unsigned long e_pfn;
@@ -457,7 +457,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
{
phys_addr_t start = 0;
unsigned long ret_val = 0;
- const struct e820_entry *entry = xen_e820_array;
+ const struct e820_entry *entry = xen_e820_table;
int i;
/*
@@ -471,9 +471,9 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
* example) the DMI tables in a reserved region that begins on
* a non-page boundary.
*/
- for (i = 0; i < xen_e820_array_entries; i++, entry++) {
+ for (i = 0; i < xen_e820_table_entries; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
- if (entry->type == E820_RAM || i == xen_e820_array_entries - 1) {
+ if (entry->type == E820_RAM || i == xen_e820_table_entries - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
@@ -601,10 +601,10 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start,
static void __init xen_ignore_unusable(void)
{
- struct e820_entry *entry = xen_e820_array;
+ struct e820_entry *entry = xen_e820_table;
unsigned int i;
- for (i = 0; i < xen_e820_array_entries; i++, entry++) {
+ for (i = 0; i < xen_e820_table_entries; i++, entry++) {
if (entry->type == E820_UNUSABLE)
entry->type = E820_RAM;
}
@@ -620,9 +620,9 @@ bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
return false;
end = start + size;
- entry = xen_e820_array;
+ entry = xen_e820_table;
- for (mapcnt = 0; mapcnt < xen_e820_array_entries; mapcnt++) {
+ for (mapcnt = 0; mapcnt < xen_e820_table_entries; mapcnt++) {
if (entry->type == E820_RAM && entry->addr <= start &&
(entry->addr + entry->size) >= end)
return false;
@@ -645,9 +645,9 @@ phys_addr_t __init xen_find_free_area(phys_addr_t size)
{
unsigned mapcnt;
phys_addr_t addr, start;
- struct e820_entry *entry = xen_e820_array;
+ struct e820_entry *entry = xen_e820_table;
- for (mapcnt = 0; mapcnt < xen_e820_array_entries; mapcnt++, entry++) {
+ for (mapcnt = 0; mapcnt < xen_e820_table_entries; mapcnt++, entry++) {
if (entry->type != E820_RAM || entry->size < size)
continue;
start = entry->addr;
@@ -750,8 +750,8 @@ char * __init xen_memory_setup(void)
max_pfn = min(max_pfn, xen_start_info->nr_pages);
mem_end = PFN_PHYS(max_pfn);
- memmap.nr_entries = ARRAY_SIZE(xen_e820_array);
- set_xen_guest_handle(memmap.buffer, xen_e820_array);
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_table);
+ set_xen_guest_handle(memmap.buffer, xen_e820_table);
op = xen_initial_domain() ?
XENMEM_machine_memory_map :
@@ -760,16 +760,16 @@ char * __init xen_memory_setup(void)
if (rc == -ENOSYS) {
BUG_ON(xen_initial_domain());
memmap.nr_entries = 1;
- xen_e820_array[0].addr = 0ULL;
- xen_e820_array[0].size = mem_end;
+ xen_e820_table[0].addr = 0ULL;
+ xen_e820_table[0].size = mem_end;
/* 8MB slack (to balance backend allocations). */
- xen_e820_array[0].size += 8ULL << 20;
- xen_e820_array[0].type = E820_RAM;
+ xen_e820_table[0].size += 8ULL << 20;
+ xen_e820_table[0].type = E820_RAM;
rc = 0;
}
BUG_ON(rc);
BUG_ON(memmap.nr_entries == 0);
- xen_e820_array_entries = memmap.nr_entries;
+ xen_e820_table_entries = memmap.nr_entries;
/*
* Xen won't allow a 1:1 mapping to be created to UNUSABLE
@@ -783,8 +783,8 @@ char * __init xen_memory_setup(void)
xen_ignore_unusable();
/* Make sure the Xen-supplied memory map is well-ordered. */
- sanitize_e820_array(xen_e820_array, ARRAY_SIZE(xen_e820_array),
- &xen_e820_array_entries);
+ sanitize_e820_table(xen_e820_table, ARRAY_SIZE(xen_e820_table),
+ &xen_e820_table_entries);
max_pages = xen_get_max_pages();
@@ -811,13 +811,13 @@ char * __init xen_memory_setup(void)
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
- addr = xen_e820_array[0].addr;
- size = xen_e820_array[0].size;
- while (i < xen_e820_array_entries) {
+ addr = xen_e820_table[0].addr;
+ size = xen_e820_table[0].size;
+ while (i < xen_e820_table_entries) {
bool discard = false;
chunk_size = size;
- type = xen_e820_array[i].type;
+ type = xen_e820_table[i].type;
if (type == E820_RAM) {
if (addr < mem_end) {
@@ -840,9 +840,9 @@ char * __init xen_memory_setup(void)
size -= chunk_size;
if (size == 0) {
i++;
- if (i < xen_e820_array_entries) {
- addr = xen_e820_array[i].addr;
- size = xen_e820_array[i].size;
+ if (i < xen_e820_table_entries) {
+ addr = xen_e820_table[i].addr;
+ size = xen_e820_table[i].size;
}
}
}
@@ -861,7 +861,7 @@ char * __init xen_memory_setup(void)
e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
E820_RESERVED);
- sanitize_e820_array(e820_array->map, ARRAY_SIZE(e820_array->map), &e820_array->nr_map);
+ sanitize_e820_table(e820_table->map, ARRAY_SIZE(e820_table->map), &e820_table->nr_map);
/*
* Check whether the kernel itself conflicts with the target E820 map.
@@ -923,21 +923,21 @@ char * __init xen_auto_xlated_memory_setup(void)
int i;
int rc;
- memmap.nr_entries = ARRAY_SIZE(xen_e820_array);
- set_xen_guest_handle(memmap.buffer, xen_e820_array);
+ memmap.nr_entries = ARRAY_SIZE(xen_e820_table);
+ set_xen_guest_handle(memmap.buffer, xen_e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc < 0)
panic("No memory map (%d)\n", rc);
- xen_e820_array_entries = memmap.nr_entries;
+ xen_e820_table_entries = memmap.nr_entries;
- sanitize_e820_array(xen_e820_array, ARRAY_SIZE(xen_e820_array),
- &xen_e820_array_entries);
+ sanitize_e820_table(xen_e820_table, ARRAY_SIZE(xen_e820_table),
+ &xen_e820_table_entries);
- for (i = 0; i < xen_e820_array_entries; i++)
- e820_add_region(xen_e820_array[i].addr, xen_e820_array[i].size,
- xen_e820_array[i].type);
+ for (i = 0; i < xen_e820_table_entries; i++)
+ e820_add_region(xen_e820_table[i].addr, xen_e820_table[i].size,
+ xen_e820_table[i].type);
/* Remove p2m info, it is not needed. */
xen_start_info->mfn_list = 0;