From 26a4676faa1ad5d99317e0cd701e5d6f3e716b77 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 7 Nov 2018 18:10:38 +0100 Subject: arm64: mm: define NET_IP_ALIGN to 0 On arm64, there is no need to add 2 bytes of padding to the start of each network buffer just to make the IP header appear 32-bit aligned. Since this might actually adversely affect DMA performance some platforms, let's override NET_IP_ALIGN to 0 to get rid of this padding. Acked-by: Ilias Apalodimas Tested-by: Ilias Apalodimas Acked-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3e2091708b8e..6b0d4dff5012 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -24,6 +24,14 @@ #define KERNEL_DS UL(-1) #define USER_DS (TASK_SIZE_64 - 1) +/* + * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is + * no point in shifting all network buffers by 2 bytes just to make some IP + * header fields appear aligned in memory, potentially sacrificing some DMA + * performance on some platforms. + */ +#define NET_IP_ALIGN 0 + #ifndef __ASSEMBLY__ #ifdef __KERNEL__ -- cgit From 24cc61d8cb5a9232fadf21a830061853c1268fdd Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 7 Nov 2018 15:16:06 +0100 Subject: arm64: memblock: don't permit memblock resizing until linear mapping is up Bhupesh reports that having numerous memblock reservations at early boot may result in the following crash: Unable to handle kernel paging request at virtual address ffff80003ffe0000 ... Call trace: __memcpy+0x110/0x180 memblock_add_range+0x134/0x2e8 memblock_reserve+0x70/0xb8 memblock_alloc_base_nid+0x6c/0x88 __memblock_alloc_base+0x3c/0x4c memblock_alloc_base+0x28/0x4c memblock_alloc+0x2c/0x38 early_pgtable_alloc+0x20/0xb0 paging_init+0x28/0x7f8 This is caused by the fact that we permit memblock resizing before the linear mapping is up, and so the memblock_reserved() array is moved into memory that is not mapped yet. So let's ensure that this crash can no longer occur, by deferring to call to memblock_allow_resize() to after the linear mapping has been created. Reported-by: Bhupesh Sharma Acked-by: Will Deacon Tested-by: Marc Zyngier Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 2 -- arch/arm64/mm/mmu.c | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9d9582cac6c4..9b432d9fcada 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -483,8 +483,6 @@ void __init arm64_memblock_init(void) high_memory = __va(memblock_end_of_DRAM() - 1) + 1; dma_contiguous_reserve(arm64_dma_phys_limit); - - memblock_allow_resize(); } void __init bootmem_init(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 394b8d554def..d1d6601b385d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -659,6 +659,8 @@ void __init paging_init(void) memblock_free(__pa_symbol(init_pg_dir), __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); + + memblock_allow_resize(); } /* -- cgit