summaryrefslogtreecommitdiff
path: root/drivers/firmware/efi/unaccepted_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firmware/efi/unaccepted_memory.c')
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c106
1 files changed, 94 insertions, 12 deletions
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 853f7dc3c21d..c2c067eff634 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -3,11 +3,21 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
+#include <linux/crash_dump.h>
+#include <linux/nmi.h>
#include <asm/unaccepted_memory.h>
-/* Protects unaccepted memory bitmap */
+/* Protects unaccepted memory bitmap and accepting_list */
static DEFINE_SPINLOCK(unaccepted_memory_lock);
+struct accept_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
/*
* accept_memory() -- Consult bitmap and accept the memory if needed.
*
@@ -20,10 +30,12 @@ static DEFINE_SPINLOCK(unaccepted_memory_lock);
* - memory that is below phys_base;
* - memory that is above the memory that addressable by the bitmap;
*/
-void accept_memory(phys_addr_t start, phys_addr_t end)
+void accept_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
+ struct accept_range range, *entry;
+ phys_addr_t end = start + size;
unsigned long flags;
u64 unit_size;
@@ -63,13 +75,13 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
* "guard" page is accepted in addition to the memory that needs to be
* used:
*
- * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
- * checks up to end+unit_size if 'end' is aligned on a unit_size
- * boundary.
+ * 1. Implicitly extend the range_contains_unaccepted_memory(start, size)
+ * checks up to the next unit_size if 'start+size' is aligned on a
+ * unit_size boundary.
*
- * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
- * 'end' is aligned on a unit_size boundary. (immediately following
- * this comment)
+ * 2. Implicitly extend accept_memory(start, size) to the next unit_size
+ * if 'size+end' is aligned on a unit_size boundary. (immediately
+ * following this comment)
*/
if (!(end % unit_size))
end += unit_size;
@@ -78,26 +90,77 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
- range_start = start / unit_size;
-
+ range.start = start / unit_size;
+ range.end = DIV_ROUND_UP(end, unit_size);
+retry:
spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+ /*
+ * Check if anybody works on accepting the same range of the memory.
+ *
+ * The check is done with unit_size granularity. It is crucial to catch
+ * all accept requests to the same unit_size block, even if they don't
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+ if (entry->end <= range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+
+ /*
+ * Somebody else accepting the range. Or at least part of it.
+ *
+ * Drop the lock and retry until it is complete.
+ */
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ goto retry;
+ }
+
+ /*
+ * Register that the range is about to be accepted.
+ * Make sure nobody else will accept it.
+ */
+ list_add(&range.list, &accepting_list);
+
+ range_start = range.start;
for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
- DIV_ROUND_UP(end, unit_size)) {
+ range.end) {
unsigned long phys_start, phys_end;
unsigned long len = range_end - range_start;
phys_start = range_start * unit_size + unaccepted->phys_base;
phys_end = range_end * unit_size + unaccepted->phys_base;
+ /*
+ * Keep interrupts disabled until the accept operation is
+ * complete in order to prevent deadlocks.
+ *
+ * Enabling interrupts before calling arch_accept_memory()
+ * creates an opportunity for an interrupt handler to request
+ * acceptance for the same memory. The handler will continuously
+ * spin with interrupts disabled, preventing other task from
+ * making progress with the acceptance process.
+ */
+ spin_unlock(&unaccepted_memory_lock);
+
arch_accept_memory(phys_start, phys_end);
+
+ spin_lock(&unaccepted_memory_lock);
bitmap_clear(unaccepted->bitmap, range_start, len);
}
+
+ list_del(&range.list);
+
+ touch_softlockup_watchdog();
+
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
-bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size)
{
struct efi_unaccepted_memory *unaccepted;
+ phys_addr_t end = start + size;
unsigned long flags;
bool ret = false;
u64 unit_size;
@@ -145,3 +208,22 @@ bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
return ret;
}
+
+#ifdef CONFIG_PROC_VMCORE
+static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
+ unsigned long pfn)
+{
+ return !pfn_is_unaccepted_memory(pfn);
+}
+
+static struct vmcore_cb vmcore_cb = {
+ .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
+};
+
+static int __init unaccepted_memory_init_kdump(void)
+{
+ register_vmcore_cb(&vmcore_cb);
+ return 0;
+}
+core_initcall(unaccepted_memory_init_kdump);
+#endif /* CONFIG_PROC_VMCORE */