summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 10:39:53 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-12 10:39:53 -0700
commita667cb7a94d48a483fb5d6006fe04a440f1a42ce (patch)
treeccb9e596db96d53fcc4ba13a3370ec84912d0f22 /arch
parentcb1d150d809e2409725ba275c5101c4fc4465b8e (diff)
parent586187d7de71b4da7956ba588ae42253b9ff6482 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few misc things - the rest of MM - remove flex_arrays, replace with new simple radix-tree implementation * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits) Drop flex_arrays sctp: convert to genradix proc: commit to genradix generic radix trees selinux: convert to kvmalloc md: convert to kvmalloc openvswitch: convert to kvmalloc of: fix kmemleak crash caused by imbalance in early memory reservation mm: memblock: update comments and kernel-doc memblock: split checks whether a region should be skipped to a helper function memblock: remove memblock_{set,clear}_region_flags memblock: drop memblock_alloc_*_nopanic() variants memblock: memblock_alloc_try_nid: don't panic treewide: add checks for the return value of memblock_alloc*() swiotlb: add checks for the return value of memblock_alloc*() init/main: add checks for the return value of memblock_alloc*() mm/percpu: add checks for the return value of memblock_alloc*() sparc: add checks for the return value of memblock_alloc*() ia64: add checks for the return value of memblock_alloc*() arch: don't memset(0) memory returned by memblock_alloc() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/core_cia.c5
-rw-r--r--arch/alpha/kernel/core_marvel.c6
-rw-r--r--arch/alpha/kernel/pci-noop.c13
-rw-r--r--arch/alpha/kernel/pci.c11
-rw-r--r--arch/alpha/kernel/pci_iommu.c16
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/arc/kernel/unwind.c3
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm64/kernel/setup.c8
-rw-r--r--arch/arm64/mm/kasan_init.c10
-rw-r--r--arch/arm64/mm/mmu.c2
-rw-r--r--arch/arm64/mm/numa.c4
-rw-r--r--arch/c6x/mm/dma-coherent.c4
-rw-r--r--arch/c6x/mm/init.c4
-rw-r--r--arch/csky/mm/highmem.c5
-rw-r--r--arch/h8300/mm/init.c4
-rw-r--r--arch/ia64/kernel/mca.c25
-rw-r--r--arch/ia64/mm/contig.c8
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/init.c38
-rw-r--r--arch/ia64/mm/tlb.c6
-rw-r--r--arch/ia64/sn/kernel/io_common.c3
-rw-r--r--arch/ia64/sn/kernel/setup.c12
-rw-r--r--arch/m68k/atari/stram.c4
-rw-r--r--arch/m68k/mm/init.c3
-rw-r--r--arch/m68k/mm/mcfmmu.c7
-rw-r--r--arch/m68k/mm/motorola.c9
-rw-r--r--arch/m68k/mm/sun3mmu.c6
-rw-r--r--arch/m68k/sun3/sun3dvma.c3
-rw-r--r--arch/microblaze/mm/init.c10
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c3
-rw-r--r--arch/mips/kernel/setup.c3
-rw-r--r--arch/mips/kernel/traps.c5
-rw-r--r--arch/mips/mm/init.c5
-rw-r--r--arch/nds32/mm/init.c12
-rw-r--r--arch/openrisc/mm/init.c5
-rw-r--r--arch/openrisc/mm/ioremap.c8
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c8
-rw-r--r--arch/powerpc/kernel/paca.c6
-rw-r--r--arch/powerpc/kernel/pci_32.c3
-rw-r--r--arch/powerpc/kernel/prom.c5
-rw-r--r--arch/powerpc/kernel/rtas.c6
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/lib/alloc.c3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c11
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c9
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c12
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c3
-rw-r--r--arch/powerpc/mm/pgtable-radix.c9
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c3
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c3
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c3
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/ps3/setup.c3
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c3
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c3
-rw-r--r--arch/s390/kernel/crash_dump.c3
-rw-r--r--arch/s390/kernel/setup.c16
-rw-r--r--arch/s390/kernel/smp.c9
-rw-r--r--arch/s390/kernel/topology.c6
-rw-r--r--arch/s390/numa/mode_emu.c3
-rw-r--r--arch/s390/numa/numa.c6
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c5
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c10
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c5
-rw-r--r--arch/sh/boards/mach-migor/setup.c5
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c10
-rw-r--r--arch/sh/kernel/machine_kexec.c3
-rw-r--r--arch/sh/mm/init.c8
-rw-r--r--arch/sh/mm/numa.c4
-rw-r--r--arch/sparc/kernel/prom_32.c6
-rw-r--r--arch/sparc/kernel/setup_64.c6
-rw-r--r--arch/sparc/kernel/smp_64.c12
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c11
-rw-r--r--arch/sparc/mm/srmmu.c18
-rw-r--r--arch/um/drivers/net_kern.c3
-rw-r--r--arch/um/drivers/vector_kern.c3
-rw-r--r--arch/um/kernel/initrd.c2
-rw-r--r--arch/um/kernel/mem.c16
-rw-r--r--arch/unicore32/kernel/setup.c4
-rw-r--r--arch/unicore32/mm/mmu.c15
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/e820.c5
-rw-r--r--arch/x86/kernel/setup_percpu.c10
-rw-r--r--arch/x86/mm/kasan_init_64.c14
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/xen/p2m.c11
-rw-r--r--arch/xtensa/mm/kasan_init.c10
-rw-r--r--arch/xtensa/mm/mmu.c3
98 files changed, 586 insertions, 102 deletions
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
index 867e8730b0c5..f489170201c3 100644
--- a/arch/alpha/kernel/core_cia.c
+++ b/arch/alpha/kernel/core_cia.c
@@ -331,7 +331,10 @@ cia_prepare_tbia_workaround(int window)
long i;
/* Use minimal 1K map. */
- ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768);
+ if (!ppte)
+ panic("%s: Failed to allocate %u bytes align=0x%x\n",
+ __func__, CIA_BROKEN_TBIA_SIZE, 32768);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index c1d0c18c71ca..1db9d0eb2922 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -83,6 +83,9 @@ mk_resource_name(int pe, int port, char *str)
sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES);
+ if (!name)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ strlen(tmp) + 1);
strcpy(name, tmp);
return name;
@@ -118,6 +121,9 @@ alloc_io7(unsigned int pe)
}
io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES);
+ if (!io7)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*io7));
io7->pe = pe;
raw_spin_lock_init(&io7->irq_lock);
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 091cff3c68fd..ae82061edae9 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -34,6 +34,9 @@ alloc_pci_controller(void)
struct pci_controller *hose;
hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
+ if (!hose)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
@@ -44,7 +47,13 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ return ptr;
}
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
@@ -54,7 +63,7 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus,
/* from hose or from bus.devfn */
if (which & IOBASE_FROM_HOSE) {
- for (hose = hose_head; hose; hose = hose->next)
+ for (hose = hose_head; hose; hose = hose->next)
if (hose->index == bus)
break;
if (!hose)
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 97098127df83..64fbfb0763b2 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -393,6 +393,9 @@ alloc_pci_controller(void)
struct pci_controller *hose;
hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES);
+ if (!hose)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hose));
*hose_tail = hose;
hose_tail = &hose->next;
@@ -403,7 +406,13 @@ alloc_pci_controller(void)
struct resource * __init
alloc_resource(void)
{
- return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ return ptr;
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index aa0f50d0f823..3034d6d936d2 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -80,6 +80,9 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
" falling back to system-wide allocation\n",
__func__, nid);
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
+ if (!arena)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*arena));
}
arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
@@ -87,13 +90,22 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
printk("%s: couldn't allocate arena ptes from node %d\n"
" falling back to system-wide allocation\n",
__func__, nid);
- arena->ptes = memblock_alloc_from(mem_size, align, 0);
+ arena->ptes = memblock_alloc(mem_size, align);
+ if (!arena->ptes)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, mem_size, align);
}
#else /* CONFIG_DISCONTIGMEM */
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- arena->ptes = memblock_alloc_from(mem_size, align, 0);
+ if (!arena)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*arena));
+ arena->ptes = memblock_alloc(mem_size, align);
+ if (!arena->ptes)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, mem_size, align);
#endif /* CONFIG_DISCONTIGMEM */
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 4b5b1b244f86..5d4c76a77a9f 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit)
unsigned long size;
size = initrd_end - initrd_start;
- start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index d34f69eb1a95..271e9fafa479 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table,
*/
static void *__init unw_hdr_alloc_early(unsigned long sz)
{
- return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
- MAX_DMA_ADDRESS);
+ return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
}
static void *unw_hdr_alloc(unsigned long sz)
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 48e700151810..11f57e2ced8a 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -124,6 +124,10 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
pmd_k = pmd_offset(pud_k, kvaddr);
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte_k)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
pmd_populate_kernel(&init_mm, pmd_k, pte_k);
return pte_k;
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 375b13f7e780..5d78b6ac0429 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, sizeof(*res));
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(end);
@@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
}
res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*res));
res->name = "System RAM";
res->start = start;
res->end = end;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 478ea8b7db87..15dddfe43319 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -205,7 +205,11 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
BUG_ON(!arm_memblock_steal_permitted);
- phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, align);
+ if (!phys)
+ panic("Failed to steal %pa bytes at %pS\n",
+ &size, (void *)_RET_IP_);
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 57de0dde3ae0..f3ce34113f89 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -721,7 +721,13 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
- return memblock_alloc(sz, sz);
+ void *ptr = memblock_alloc(sz, sz);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, sz, sz);
+
+ return ptr;
}
static void *__init late_alloc(unsigned long sz)
@@ -994,6 +1000,9 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
return;
svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
+ if (!svm)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
@@ -1016,6 +1025,9 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
struct static_vm *svm;
svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
+ if (!svm)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, sizeof(*svm), __alignof__(*svm));
vm = &svm->vm;
vm->addr = (void *)addr;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 834b321a88f8..f8482fe5a190 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -208,6 +208,7 @@ static void __init request_standard_resources(void)
struct memblock_region *region;
struct resource *res;
unsigned long i = 0;
+ size_t res_size;
kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1);
@@ -215,9 +216,10 @@ static void __init request_standard_resources(void)
kernel_data.end = __pa_symbol(_end - 1);
num_standard_resources = memblock.memory.cnt;
- standard_resources = memblock_alloc_low(num_standard_resources *
- sizeof(*standard_resources),
- SMP_CACHE_BYTES);
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
+ if (!standard_resources)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
for_each_memblock(memory, region) {
res = &standard_resources[i++];
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f37a86d2a69d..296de39ddee5 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node,
+ __pa(MAX_DMA_ADDRESS));
+
return __pa(p);
}
@@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node)
void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_KASAN, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node,
+ __pa(MAX_DMA_ADDRESS));
+
return __pa(p);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 402b6495ff58..e97f018ff740 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -103,6 +103,8 @@ static phys_addr_t __init early_pgtable_alloc(void)
void *ptr;
phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate page table page\n");
/*
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 7a0a555b366a..06a6f264f2dd 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -237,6 +237,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 0be289839ce0..0d3701bc88f6 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -138,6 +138,10 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
sizeof(long));
+ if (!dma_bitmap)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
+ sizeof(long));
}
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index af5ada0520be..fe582c3a1794 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -40,7 +40,9 @@ void __init paging_init(void)
empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up user data space
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 53b1bfa4c462..3317b774f6dc 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -141,6 +141,11 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
set_pmd(pmd, __pmd(__pa(pte)));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 6519252ac4db..0f04a5e9aa4f 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -68,7 +68,9 @@ void __init paging_init(void)
* to a couple of allocated pages.
*/
empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 91bd1e129379..5cabb3fd159f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -359,11 +359,6 @@ typedef struct ia64_state_log_s
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
-#define IA64_LOG_ALLOCATE(it, size) \
- {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \
- ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
- (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);}
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
#define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
#define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
@@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
#define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
#define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count
+static inline void ia64_log_allocate(int it, u64 size)
+{
+ ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
+
+ ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
+ (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
+}
+
/*
* ia64_log_init
* Reset the OS ia64 log buffer
@@ -399,9 +407,7 @@ ia64_log_init(int sal_info_type)
return;
// set up OS data structures to hold error info
- IA64_LOG_ALLOCATE(sal_info_type, max_size);
- memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
- memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
+ ia64_log_allocate(sal_info_type, max_size);
}
/*
@@ -1835,8 +1841,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Caller prevents this from being called after init */
static void * __ref mca_bootmem(void)
{
- return memblock_alloc_from(sizeof(struct ia64_mca_cpu),
- KERNEL_STACK_SIZE, 0);
+ return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
}
/* Do per-CPU MCA-related initialization. */
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 6e447234205c..d29fb6b9fa33 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -84,9 +84,13 @@ skip:
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(),
- PERCPU_PAGE_SIZE,
+ size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
+
+ cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
+ if (!cpu_data)
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
/**
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index f9c36750c6a4..05490dd073e6 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -454,6 +454,10 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE,
bestnode);
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
+ __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
+ __pa(MAX_DMA_ADDRESS));
return ptr;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 29d841525ca1..e49200e31750 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
- if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pgd_none(*pgd)) {
+ pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pud)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, pud);
+ }
pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pmd)
+ goto err_alloc;
+ pud_populate(&init_mm, pud, pmd);
+ }
pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node));
+ if (pmd_none(*pmd)) {
+ pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pte)
+ goto err_alloc;
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
pte = pte_offset_kernel(pmd, address);
- if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT,
+ if (pte_none(*pte)) {
+ void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
+ node);
+ if (!page)
+ goto err_alloc;
+ set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
PAGE_KERNEL));
+ }
}
return 0;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node);
+ return -ENOMEM;
}
struct memmap_init_callback_data {
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 9340bcb4f29c..5fc89aabdce1 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -61,8 +61,14 @@ mmu_context_init (void)
{
ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES);
+ if (!ia64_ctx.bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
SMP_CACHE_BYTES);
+ if (!ia64_ctx.flushmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ (ia64_ctx.max_ctx + 1) >> 3);
}
/*
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 8df13d0d96fa..d46847323ef6 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
hubdev_info = (struct hubdev_info *)memblock_alloc_node(size,
SMP_CACHE_BYTES,
node);
+ if (!hubdev_info)
+ panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n",
+ __func__, size, SMP_CACHE_BYTES, node);
npda->pdinfo = (void *)hubdev_info;
}
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index a6d40a2c5bff..e6a5049ef503 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p)
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES,
cnode);
+ if (!nodepdaindr[cnode])
+ panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
spin_lock_init(&nodepdaindr[cnode]->ptc_lock);
@@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p)
/*
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
- for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++)
+ for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) {
nodepdaindr[cnode] =
memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0);
+ if (!nodepdaindr[cnode])
+ panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(nodepda_t), SMP_CACHE_BYTES,
+ cnode);
+ }
+
/*
* Now copy the array of nodepda pointers to each nodepda.
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index 6ffc204eb07d..6152f9f631d2 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -97,6 +97,10 @@ void __init atari_stram_reserve_pages(void *start_mem)
pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n");
stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size,
PAGE_SIZE);
+ if (!stram_pool.start)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, pool_size, PAGE_SIZE);
+
stram_pool.end = stram_pool.start + pool_size - 1;
request_resource(&iomem_resource, &stram_pool);
stram_virt_offset = 0;
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 933c33e76a48..8868a4c9adae 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -94,6 +94,9 @@ void __init paging_init(void)
high_memory = (void *) end_mem;
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index 0de4999a3810..6cb1e41d58d0 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -44,7 +44,9 @@ void __init paging_init(void)
int i;
empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- memset((void *) empty_zero_page, 0, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pg_dir = swapper_pg_dir;
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
@@ -52,6 +54,9 @@ void __init paging_init(void)
size = num_pages * sizeof(pte_t);
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
+ if (!next_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 3f3d0bf36091..356601bf96d9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -55,6 +55,9 @@ static pte_t * __init kernel_page_table(void)
pte_t *ptablep;
ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!ptablep)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram(ptablep);
@@ -96,6 +99,9 @@ static pmd_t * __init kernel_ptr_table(void)
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!last_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram(last_pgtable);
@@ -278,6 +284,9 @@ void __init paging_init(void)
* to a couple of allocated pages
*/
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
/*
* Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index f736db48a2e1..eca1c46bb90a 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -46,6 +46,9 @@ void __init paging_init(void)
unsigned long size;
empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
address = PAGE_OFFSET;
pg_dir = swapper_pg_dir;
@@ -56,6 +59,9 @@ void __init paging_init(void)
size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
+ if (!next_pgtable)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, PAGE_SIZE);
bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
/* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c
index 4d64711d3d47..399f3d06125f 100644
--- a/arch/m68k/sun3/sun3dvma.c
+++ b/arch/m68k/sun3/sun3dvma.c
@@ -269,6 +269,9 @@ void __init dvma_init(void)
iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
SMP_CACHE_BYTES);
+ if (!iommu_use)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 44f4b8910c21..7e97d44f6538 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -374,12 +374,14 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
- if (mem_init_done)
+ if (mem_init_done) {
p = kzalloc(size, mask);
- else {
+ } else {
p = memblock_alloc(size, SMP_CACHE_BYTES);
- if (p)
- memset(p, 0, size);
+ if (!p)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, size);
}
+
return p;
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index e8eb60ed99f2..11d5a4e90736 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -245,6 +245,9 @@ void __init plat_swiotlb_setup(void)
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE);
+ if (!octeon_swiotlb)
+ panic("%s: Failed to allocate %zu bytes align=%lx\n",
+ __func__, swiotlbsize, PAGE_SIZE);
if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 5151532ad959..8d1dc6c71173 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -919,6 +919,9 @@ static void __init resource_init(void)
end = HIGHMEM_START - 1;
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
res->start = start;
res->end = end;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 42d411125690..98ca55d62201 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2293,7 +2293,10 @@ void __init trap_init(void)
phys_addr_t ebase_pa;
ebase = (unsigned long)
- memblock_alloc_from(size, 1 << fls(size), 0);
+ memblock_alloc(size, 1 << fls(size));
+ if (!ebase)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, size, 1 << fls(size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c3b45e248806..bbb196ad5f26 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -252,6 +252,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
if (pmd_none(*pmd)) {
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
set_pmd(pmd, __pmd((unsigned long)pte));
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index d1e521cce317..1d03633f89a9 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -79,6 +79,9 @@ static void __init map_ram(void)
/* Alloc one page for holding PTE's... */
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
/* Fill the newly allocated page with PTE'S */
@@ -111,6 +114,9 @@ static void __init fixedrange_init(void)
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!fixmap_pmd_p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
#ifdef CONFIG_HIGHMEM
@@ -123,6 +129,9 @@ static void __init fixedrange_init(void)
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
pkmap_page_table = pte;
#endif /* CONFIG_HIGHMEM */
@@ -148,6 +157,9 @@ void __init paging_init(void)
/* allocate space for empty_zero_page */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
zone_sizes_init();
empty_zero_page = virt_to_page(zero_page);
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d157310eb377..caeb4184e8a6 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -105,7 +105,10 @@ static void __init map_ram(void)
}
/* Alloc one page for holding PTE's... */
- pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
+ pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate page for PTEs\n",
+ __func__);
set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
/* Fill the newly allocated page with PTE'S */
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 051bcb4fefd3..a8509950dbbc 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -122,10 +122,14 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm)
{
pte_t *pte;
- if (likely(mem_init_done))
+ if (likely(mem_init_done)) {
pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
- else
+ } else {
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+ }
return pte;
}
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index e49bd5efcfe6..c66fd3ce6478 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -810,7 +810,6 @@ static int __init process_cpufeatures_node(unsigned long node,
int len;
f = &dt_cpu_features[i];
- memset(f, 0, sizeof(struct dt_cpu_feature));
f->node = node;
@@ -1005,7 +1004,12 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
/* Count and allocate space for cpu features */
of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
&nr_dt_cpu_features);
- dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE));
+ dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE);
+ if (!dt_cpu_features)
+ panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
+ __func__,
+ sizeof(struct dt_cpu_feature) * nr_dt_cpu_features,
+ PAGE_SIZE);
cpufeatures_setup_start(isa);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 8c890c6557ed..e7382abee868 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -196,7 +196,11 @@ void __init allocate_paca_ptrs(void)
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
- paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES));
+ paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES);
+ if (!paca_ptrs)
+ panic("Failed to allocate %d bytes for paca pointers\n",
+ paca_ptrs_size);
+
memset(paca_ptrs, 0x88, paca_ptrs_size);
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index d3f04f2d8249..0417fda13636 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -205,6 +205,9 @@ pci_create_OF_bus_map(void)
of_prop = memblock_alloc(sizeof(struct property) + 256,
SMP_CACHE_BYTES);
+ if (!of_prop)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct property) + 256);
dn = of_find_node_by_path("/");
if (dn) {
memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 4181ec715f88..4221527b082f 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -126,7 +126,10 @@ static void __init move_device_tree(void)
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
!memblock_is_memory(start + size - 1) ||
overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
- p = __va(memblock_phys_alloc(size, PAGE_SIZE));
+ p = memblock_alloc_raw(size, PAGE_SIZE);
+ if (!p)
+ panic("Failed to allocate %lu bytes to move device tree\n",
+ size);
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
DBG("Moved device tree to 0x%px\n", p);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index de35bd8f047f..fbc676160adf 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1187,7 +1187,11 @@ void __init rtas_initialize(void)
ibm_suspend_me_token = rtas_token("ibm,suspend-me");
}
#endif
- rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
+ rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
+ 0, rtas_region);
+ if (!rtas_rmo_buf)
+ panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
+ PAGE_SIZE, &rtas_region);
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index f17868e19e2c..2e5dfb6e0823 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -461,6 +461,9 @@ void __init smp_setup_cpu_maps(void)
cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32),
__alignof__(u32));
+ if (!cpu_to_phys_id)
+ panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
+ __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32));
for_each_node_by_type(dn, "cpu") {
const __be32 *intserv;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ff0aac42bb33..ba404dd9ce1d 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -905,6 +905,10 @@ static void __ref init_fallback_flush(void)
l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
l1d_size, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
+ if (!l1d_flush_fallback_area)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
+ __func__, l1d_size * 2, l1d_size, &limit);
+
for_each_possible_cpu(cpu) {
struct paca_struct *paca = paca_ptrs[cpu];
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index dedf88a76f58..ce180870bd52 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -15,6 +15,9 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
p = kzalloc(size, mask);
else {
p = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!p)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ size);
}
return p;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 3d4b2399192f..0a4f939a8161 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -882,8 +882,12 @@ static void __init htab_initialize(void)
}
#endif /* CONFIG_PPC_CELL */
- table = memblock_alloc_base(htab_size_bytes, htab_size_bytes,
- limit);
+ table = memblock_phys_alloc_range(htab_size_bytes,
+ htab_size_bytes,
+ 0, limit);
+ if (!table)
+ panic("ERROR: Failed to allocate %pa bytes below %pa\n",
+ &htab_size_bytes, &limit);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
@@ -911,6 +915,9 @@ static void __init htab_initialize(void)
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
ppc64_rma_size, NUMA_NO_NODE);
+ if (!linear_map_hash_slots)
+ panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
+ __func__, linear_map_hash_count, &ppc64_rma_size);
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 22d71a58167f..1945c5f19f5e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -461,10 +461,19 @@ void __init mmu_context_init(void)
* Allocate the maps used by context management
*/
context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!context_map)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
SMP_CACHE_BYTES);
+ if (!context_mm)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(void *) * (LAST_CONTEXT + 1));
#ifdef CONFIG_SMP
stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare",
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index ac49e4158e50..f976676004ad 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -788,6 +788,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
int tnid;
nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
nd = __va(nd_pa);
/* report and initialize */
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index 53cbc7dc2df2..1032ef7aaf62 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -57,8 +57,16 @@ void vmemmap_remove_mapping(unsigned long start,
static __ref void *early_alloc_pgtable(unsigned long size)
{
- return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
- __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
+ void *ptr;
+
+ ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
+ __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
+ __func__, size, size, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 92a3e4c39540..a4341aba0af4 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -197,6 +197,9 @@ void __init mmu_partition_table_init(void)
BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
/* Initialize the Partition Table with no entries */
partition_tb = memblock_alloc(patb_size, patb_size);
+ if (!partition_tb)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, patb_size, patb_size);
/*
* update partition table control register,
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index e377684ac6ad..154472a28c77 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -53,13 +53,20 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
{
phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
+ void *ptr;
if (region_start)
min_addr = region_start;
if (region_end)
max_addr = region_end;
- return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
+ ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
+ __func__, size, size, nid, &min_addr, &max_addr);
+
+ return ptr;
}
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6c8a60b1e31d..f29d2f118b44 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -340,6 +340,9 @@ void __init MMU_init_hw(void)
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = memblock_alloc(Hash_size, Hash_size);
+ if (!Hash)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, Hash_size, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 86368e238f6e..044c6089462c 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -211,6 +211,9 @@ static int __init iob_init(struct device_node *dn)
iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
MEMBLOCK_LOW_LIMIT, 0x80000000,
NUMA_NO_NODE);
+ if (!iob_l2_base)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
+ __func__, 1UL << 21, 1UL << 21, 0x80000000);
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 9360cdc408c1..86989c5779c2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -519,6 +519,9 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
return -EINVAL;
}
nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES);
+ if (!nvram_image)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ NVRAM_SIZE);
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 727a7de08635..2b0eca104f86 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -171,6 +171,9 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
* Allocate a buffer to hold the MC recoverable ranges.
*/
mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
+ if (!mc_recoverable_range)
+ panic("%s: Failed to allocate %u bytes align=0x%lx\n",
+ __func__, size, __alignof__(u64));
for (i = 0; i < mc_recoverable_range_len; i++) {
mc_recoverable_range[i].start_addr =
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index fa6af52b5219..3ead4c237ed0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3657,6 +3657,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
+ if (!phb)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*phb));
/* Allocate PCI controller */
phb->hose = hose = pcibios_alloc_controller(np);
@@ -3703,6 +3706,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
+ if (!phb->diag_data)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ phb->diag_data_size);
/* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
@@ -3762,6 +3768,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
aux = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!aux)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 658bfab3350b..4ce5458eb0f8 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -127,6 +127,9 @@ static void __init prealloc(struct ps3_prealloc *p)
return;
p->address = memblock_alloc(p->size, p->align);
+ if (!p->address)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index fc5c5c23303e..2a751795ec1e 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -265,6 +265,9 @@ static void allocate_dart(void)
* prefetching into invalid pages and corrupting data
*/
tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
+ if (!tmp)
+ panic("DART: table allocation failed\n");
+
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index d45450f6666a..51a679a1c403 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -129,6 +129,9 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!bmp->bitmap)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ size);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 97eae3871868..f96a5857bbfd 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -61,6 +61,9 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
struct save_area *sa;
sa = (void *) memblock_phys_alloc(sizeof(*sa), 8);
+ if (!sa)
+ panic("Failed to allocate save area\n");
+
if (is_boot_cpu)
list_add(&sa->list, &dump_save_areas);
else
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 12934e8fbb91..2c642af526ce 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -378,6 +378,10 @@ static void __init setup_lowcore_dat_off(void)
*/
BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
+ if (!lc)
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
+ __func__, sizeof(*lc), sizeof(*lc));
+
lc->restart_psw.mask = PSW_KERNEL_BITS;
lc->restart_psw.addr = (unsigned long) restart_int_handler;
lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
@@ -419,6 +423,9 @@ static void __init setup_lowcore_dat_off(void)
* all CPUs in cast *one* of them does a PSW restart.
*/
restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!restart_stack)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
restart_stack += STACK_INIT_OFFSET;
/*
@@ -495,6 +502,9 @@ static void __init setup_resources(void)
for_each_memblock(memory, reg) {
res = memblock_alloc(sizeof(*res), 8);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*res), 8);
res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
res->name = "System RAM";
@@ -509,6 +519,9 @@ static void __init setup_resources(void)
continue;
if (std_res->end > res->end) {
sub_res = memblock_alloc(sizeof(*sub_res), 8);
+ if (!sub_res)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*sub_res), 8);
*sub_res = *std_res;
sub_res->end = res->end;
std_res->start = res->end + 1;
@@ -966,6 +979,9 @@ static void __init setup_randomness(void)
vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
PAGE_SIZE);
+ if (!vmms)
+ panic("Failed to allocate memory for sysinfo structure\n");
+
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index b198ece2aad6..3fe1c77c361b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -656,7 +656,11 @@ void __init smp_save_dump_cpus(void)
/* No previous system present, normal boot. */
return;
/* Allocate a page as dumping area for the store status sigps */
- page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
+ page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
+ if (!page)
+ panic("ERROR: Failed to allocate %lx bytes below %lx\n",
+ PAGE_SIZE, 1UL << 31);
+
/* Set multi-threading state to the previous system. */
pcpu_set_smt(sclp.mtid_prev);
boot_cpu_addr = stap();
@@ -766,6 +770,9 @@ void __init smp_detect_cpus(void)
/* Get CPU information */
info = memblock_alloc(sizeof(*info), 8);
+ if (!info)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*info), 8);
smp_get_core_info(info, 1);
/* Find boot CPU type */
if (sclp.has_core_type) {
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 8992b04c0ade..8964a3f60aad 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
nr_masks = max(nr_masks, 1);
for (i = 0; i < nr_masks; i++) {
mask->next = memblock_alloc(sizeof(*mask->next), 8);
+ if (!mask->next)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*mask->next), 8);
mask = mask->next;
}
}
@@ -538,6 +541,9 @@ void __init topology_init_early(void)
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!tl_info)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index bfba273c32c0..71a12a4f4906 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void)
int i;
emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
+ if (!emu_cores)
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(*emu_cores), 8);
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
emu_cores->to_node_id[i] = NODE_ID_FREE;
}
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 2d1271e2a70d..8eb9e9743f5d 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -92,8 +92,12 @@ static void __init numa_setup_memory(void)
} while (cur_base < end_of_dram);
/* Allocate and fill out node_data */
- for (nid = 0; nid < MAX_NUMNODES; nid++)
+ for (nid = 0; nid < MAX_NUMNODES; nid++) {
NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x\n",
+ __func__, sizeof(pg_data_t), 8);
+ }
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 7899b4f51fdd..8301a4378f50 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -556,7 +556,10 @@ static void __init ap325rxa_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3c59019c4695..34e5414c5563 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1476,12 +1476,18 @@ static void __init ecovec_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU0 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu0_dma_membase = phys;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU1 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu1_dma_membase = phys;
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index b8bf67c86eab..1cf9a47ac90e 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -630,7 +630,10 @@ static void __init kfr2r09_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 03579faa4ce3..90702740f207 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -630,7 +630,10 @@ static void __init migor_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 13c2d3ce78f4..3674064816c7 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -963,12 +963,18 @@ static void __init ms7724se_mv_mem_reserve(void)
phys_addr_t phys;
phys_addr_t size = CEU_BUFFER_MEMORY_SIZE;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU0 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu0_dma_membase = phys;
- phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE);
+ phys = memblock_phys_alloc(size, PAGE_SIZE);
+ if (!phys)
+ panic("Failed to allocate CEU1 memory\n");
+
memblock_free(phys, size);
memblock_remove(phys, size);
ceu1_dma_membase = phys;
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index b9f9f1a5afdc..63d63a36f6f2 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -168,7 +168,8 @@ void __init reserve_crashkernel(void)
crash_size = PAGE_ALIGN(resource_size(&crashk_res));
if (!crashk_res.start) {
unsigned long max = memblock_end_of_DRAM() - memory_limit;
- crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
+ crashk_res.start = memblock_phys_alloc_range(crash_size,
+ PAGE_SIZE, 0, max);
if (!crashk_res.start) {
pr_err("crashkernel allocation failed\n");
goto disable;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index a0fa4de03dd5..70621324db41 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -128,6 +128,9 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
pmd_t *pmd;
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, pmd);
BUG_ON(pmd != pmd_offset(pud, 0));
}
@@ -141,6 +144,9 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *pte;
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
@@ -196,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NEED_MULTIPLE_NODES
- NODE_DATA(nid) = memblock_alloc_try_nid_nopanic(
+ NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index c4bde6148810..f7e4439deb17 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -43,6 +43,10 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Node-local pgdat */
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
+ if (!NODE_DATA(nid))
+ panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n",
+ __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES,
+ nid);
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index 42d7f2a7da6d..869b16c96157 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -32,9 +32,9 @@ void * __init prom_early_alloc(unsigned long size)
{
void *ret;
- ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
- if (ret != NULL)
- memset(ret, 0, size);
+ ret = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ret)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
prom_early_allocated += size;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 51c4d12c0853..fd2182a5c32d 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -624,8 +624,14 @@ void __init alloc_irqstack_bootmem(void)
softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!softirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
THREAD_SIZE, node);
+ if (!hardirq_stack[i])
+ panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
+ __func__, THREAD_SIZE, THREAD_SIZE, node);
}
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f45d876983f1..a8275fea4b70 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1628,6 +1628,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pud_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pgd_populate(&init_mm, pgd, new);
}
@@ -1636,6 +1638,8 @@ static void __init pcpu_populate_pte(unsigned long addr)
pmd_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pud_populate(&init_mm, pud, new);
}
@@ -1644,8 +1648,16 @@ static void __init pcpu_populate_pte(unsigned long addr)
pte_t *new;
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
pmd_populate_kernel(&init_mm, pmd, new);
}
+
+ return;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
}
void __init setup_per_cpu_areas(void)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index d900952bfc5f..a8ff29821bdb 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -264,7 +264,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
sparc_valid_addr_bitmap = (unsigned long *)
- memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL);
+ memblock_alloc(i << 2, SMP_CACHE_BYTES);
if (sparc_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index ef340e8f209f..f2d70ff7a284 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1809,6 +1809,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pgd_populate(&init_mm, pgd, new);
}
@@ -1822,6 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pud_populate(&init_mm, pud, new);
}
@@ -1836,6 +1840,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
PAGE_SIZE);
+ if (!new)
+ goto err_alloc;
alloc_bytes += PAGE_SIZE;
pmd_populate_kernel(&init_mm, pmd, new);
}
@@ -1855,6 +1861,11 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
}
return alloc_bytes;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return -ENOMEM;
}
static void __init flush_all_kernel_tsbs(void)
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index b609362e846f..aaebbc00d262 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -303,13 +303,19 @@ static void __init srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
- srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size,
- SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+ srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size,
+ SRMMU_NOCACHE_ALIGN_MAX);
+ if (!srmmu_nocache_pool)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap =
- memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
- SMP_CACHE_BYTES, 0UL);
+ memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+ SMP_CACHE_BYTES);
+ if (!srmmu_nocache_bitmap)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ BITS_TO_LONGS(bitmap_bits) * sizeof(long));
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
@@ -467,7 +473,9 @@ static void __init sparc_context_init(int numctx)
unsigned long size;
size = numctx * sizeof(struct ctx_list);
- ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL);
+ ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!ctx_list_pool)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, size);
for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index d80cfb1d9430..6e5be5fb4143 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -649,6 +649,9 @@ static int __init eth_setup(char *str)
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
+ if (!new)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->index = n;
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 046fa9ea0ccc..596e7056f376 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1576,6 +1576,9 @@ static int __init vector_setup(char *str)
return 1;
}
new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
+ if (!new)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*new));
INIT_LIST_HEAD(&new->list);
new->unit = n;
new->arguments = str;
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index ce169ea87e61..1dcd310cb34d 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -37,6 +37,8 @@ int __init read_initrd(void)
}
area = memblock_alloc(size, SMP_CACHE_BYTES);
+ if (!area)
+ panic("%s: Failed to allocate %llu bytes\n", __func__, size);
if (load_initrd(initrd, area, size) == -1)
return 0;
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 799b571a8f88..99aa11bf53d1 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -66,6 +66,10 @@ static void __init one_page_table_init(pmd_t *pmd)
if (pmd_none(*pmd)) {
pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
if (pte != pte_offset_kernel(pmd, 0))
@@ -77,6 +81,10 @@ static void __init one_md_table_init(pud_t *pud)
{
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd_table)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
if (pmd_table != pmd_offset(pud, 0))
BUG();
@@ -126,6 +134,10 @@ static void __init fixaddr_user_init( void)
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
+ if (!v)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, size, PAGE_SIZE);
+
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
p = __pa(v);
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
@@ -146,6 +158,10 @@ void __init paging_init(void)
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
+ if (!empty_zero_page)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
index 4b0cb68c355a..d3239cf2e837 100644
--- a/arch/unicore32/kernel/setup.c
+++ b/arch/unicore32/kernel/setup.c
@@ -207,6 +207,10 @@ request_standard_resources(struct meminfo *mi)
continue;
res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes align=%x\n",
+ __func__, sizeof(*res), SMP_CACHE_BYTES);
+
res->name = "System RAM";
res->start = mi->bank[i].start;
res->end = mi->bank[i].start + mi->bank[i].size - 1;
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index a40219291965..aa2060beb408 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -145,8 +145,13 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
unsigned long prot)
{
if (pmd_none(*pmd)) {
- pte_t *pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t),
- PTRS_PER_PTE * sizeof(pte_t));
+ size_t size = PTRS_PER_PTE * sizeof(pte_t);
+ pte_t *pte = memblock_alloc(size, size);
+
+ if (!pte)
+ panic("%s: Failed to allocate %zu bytes align=%zx\n",
+ __func__, size, size);
+
__pmd_populate(pmd, __pa(pte) | prot);
}
BUG_ON(pmd_bad(*pmd));
@@ -349,6 +354,9 @@ static void __init devicemaps_init(void)
* Allocate the vector page early.
*/
vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!vectors)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -426,6 +434,9 @@ void __init paging_init(void)
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!zero_page)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2624de16cd7a..8dcbf6890714 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -935,6 +935,9 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
#define HPET_RESOURCE_NAME_SIZE 9
hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE,
SMP_CACHE_BYTES);
+ if (!hpet_res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE);
hpet_res->name = (void *)&hpet_res[1];
hpet_res->flags = IORESOURCE_MEM;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 264e3221d923..53aa234a6803 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2581,6 +2581,8 @@ static struct resource * __init ioapic_setup_resources(void)
n *= nr_ioapics;
mem = memblock_alloc(n, SMP_CACHE_BYTES);
+ if (!mem)
+ panic("%s: Failed to allocate %lu bytes\n", __func__, n);
res = (void *)mem;
mem += sizeof(struct resource) * nr_ioapics;
@@ -2625,6 +2627,9 @@ fake_ioapic_page:
#endif
ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
PAGE_SIZE);
+ if (!ioapic_phys)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 666e4af145a2..2879e234e193 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -776,7 +776,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align)
{
u64 addr;
- addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
+ addr = memblock_phys_alloc(size, align);
if (addr) {
e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n");
@@ -1097,6 +1097,9 @@ void __init e820__reserve_resources(void)
res = memblock_alloc(sizeof(*res) * e820_table->nr_entries,
SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(*res) * e820_table->nr_entries);
e820_res = res;
for (i = 0; i < e820_table->nr_entries; i++) {
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 13af08827eef..4bf46575568a 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -106,22 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
void *ptr;
if (!node_online(node) || !NODE_DATA(node)) {
- ptr = memblock_alloc_from_nopanic(size, align, goal);
+ ptr = memblock_alloc_from(size, align, goal);
pr_info("cpu %d has no node %d or node-local memory\n",
cpu, node);
pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
cpu, size, __pa(ptr));
} else {
- ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE,
- node);
+ ptr = memblock_alloc_try_nid(size, align, goal,
+ MEMBLOCK_ALLOC_ACCESSIBLE,
+ node);
pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
cpu, size, node, __pa(ptr));
}
return ptr;
#else
- return memblock_alloc_from_nopanic(size, align, goal);
+ return memblock_alloc_from(size, align, goal);
#endif
}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 462fde83b515..8dc0fc0b1382 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
-static __init void *early_alloc(size_t size, int nid, bool panic)
+static __init void *early_alloc(size_t size, int nid, bool should_panic)
{
- if (panic)
- return memblock_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
- else
- return memblock_alloc_try_nid_nopanic(size, size,
+ void *ptr = memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+ if (!ptr && should_panic)
+ panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+ (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
}
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 12c1b7a83ed7..dfb6c4df639a 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -195,15 +195,11 @@ static void __init alloc_node_data(int nid)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
- MEMBLOCK_ALLOC_ACCESSIBLE);
- if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
- nd_size, nid);
- return;
- }
+ pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
+ nd_size, nid);
+ return;
}
nd = __va(nd_pa);
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index b4ab779f1d47..ac9e7bf49b66 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -141,6 +141,9 @@ void * __init prom_early_alloc(unsigned long size)
* wasted bootmem) and hand off chunks of it to callers.
*/
res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ chunk_size);
BUG_ON(!res);
prom_early_allocated += chunk_size;
memset(res, 0, chunk_size);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 055e37e43541..95ce9b5be411 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -181,8 +181,15 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
static void * __ref alloc_p2m_page(void)
{
- if (unlikely(!slab_is_available()))
- return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (unlikely(!slab_is_available())) {
+ void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ return ptr;
+ }
return (void *)__get_free_page(GFP_KERNEL);
}
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 1734cda6bc4a..af7152560bc3 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -45,6 +45,10 @@ static void __init populate(void *start, void *end)
pmd_t *pmd = pmd_offset(pgd, vaddr);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
+
pr_debug("%s: %p - %p\n", __func__, start, end);
for (i = j = 0; i < n_pmds; ++i) {
@@ -52,8 +56,10 @@ static void __init populate(void *start, void *end)
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
phys_addr_t phys =
- memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
- MEMBLOCK_ALLOC_ANYWHERE);
+ memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+ if (!phys)
+ panic("Failed to allocate page table page\n");
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index a4dcfd39bc5c..2fb7d1172228 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -32,6 +32,9 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
__func__, vaddr, n_pages);
pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %zu bytes align=%lx\n",
+ __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; ++i)
pte_clear(NULL, 0, pte + i);