summaryrefslogtreecommitdiff
path: root/arch/parisc/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/mm/init.c')
-rw-r--r--arch/parisc/mm/init.c211
1 files changed, 173 insertions, 38 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 591a4e939415..14270715d754 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -24,6 +24,7 @@
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
+#include <linux/execmem.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -32,6 +33,8 @@
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
+#include <asm/asm-offsets.h>
+#include <asm/shmbuf.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@@ -127,16 +130,12 @@ static void __init setup_bootmem(void)
int j;
for (j = i; j > 0; j--) {
- physmem_range_t tmp;
-
if (pmem_ranges[j-1].start_pfn <
pmem_ranges[j].start_pfn) {
break;
}
- tmp = pmem_ranges[j-1];
- pmem_ranges[j-1] = pmem_ranges[j];
- pmem_ranges[j] = tmp;
+ swap(pmem_ranges[j-1], pmem_ranges[j]);
}
}
@@ -341,9 +340,9 @@ static void __init setup_bootmem(void)
static bool kernel_set_to_readonly;
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
@@ -378,10 +377,8 @@ static void __init map_pages(unsigned long start_vaddr,
#if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) {
- pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
- PAGE_SIZE << PMD_ORDER);
- if (!pmd)
- panic("pmd allocation failed.\n");
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
+ PAGE_SIZE << PMD_TABLE_ORDER);
pud_populate(NULL, pud, pmd);
}
#endif
@@ -389,9 +386,7 @@ static void __init map_pages(unsigned long start_vaddr,
pmd = pmd_offset(pud, vaddr);
for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
if (pmd_none(*pmd)) {
- pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!pg_table)
- panic("page table allocation failed\n");
+ pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(NULL, pmd, pg_table);
}
@@ -453,21 +448,19 @@ void __init set_kernel_text_rw(int enable_read_write)
flush_tlb_all();
}
-void __ref free_initmem(void)
+void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
unsigned long kernel_end = (unsigned long)&_end;
/* Remap kernel text and data, but do not touch init section yet. */
- kernel_set_to_readonly = true;
map_pages(init_end, __pa(init_end), kernel_end - init_end,
PAGE_KERNEL, 0);
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
- * This is tricky, because map_pages is in the init section.
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
@@ -484,7 +477,7 @@ void __ref free_initmem(void)
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
-
+
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
@@ -495,11 +488,18 @@ void __ref free_initmem(void)
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- /* rodata memory was already mapped with KERNEL_RO access rights by
- pagetable_init() and map_pages(). No need to do additional stuff here */
- unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
+ unsigned long start = (unsigned long) &__start_rodata;
+ unsigned long end = (unsigned long) &__end_rodata;
+
+ pr_info("Write protecting the kernel read-only data: %luk\n",
+ (end - start) >> 10);
+
+ kernel_set_to_readonly = true;
+ map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0);
- pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
+ /* force the kernel to see the new page table entries */
+ flush_cache_all();
+ flush_tlb_all();
}
#endif
@@ -528,10 +528,6 @@ void mark_rodata_ro(void)
void *parisc_vmalloc_start __ro_after_init;
EXPORT_SYMBOL(parisc_vmalloc_start);
-#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start __ro_after_init;
-#endif
-
void __init mem_init(void)
{
/* Do sanity checks on IPC (compat) structures */
@@ -560,9 +556,11 @@ void __init mem_init(void)
BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
#endif
- high_memory = __va((max_pfn << PAGE_SHIFT));
- set_max_mapnr(max_low_pfn);
- memblock_free_all();
+#ifdef CONFIG_64BIT
+ /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
+ BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
+ BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
+#endif
#ifdef CONFIG_PA11
if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
@@ -625,12 +623,10 @@ static void __init pagetable_init(void)
for (range = 0; range < npmem_ranges; range++) {
unsigned long start_paddr;
- unsigned long end_paddr;
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
size = pmem_ranges[range].pages << PAGE_SHIFT;
- end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
@@ -644,9 +640,7 @@ static void __init pagetable_init(void)
}
#endif
- empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!empty_zero_page)
- panic("zero page allocation failed.\n");
+ empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
}
@@ -670,6 +664,35 @@ static void __init gateway_init(void)
PAGE_SIZE, PAGE_GATEWAY, 1);
}
+static void __init fixmap_init(void)
+{
+ unsigned long addr = FIXMAP_START;
+ unsigned long end = FIXMAP_START + FIXMAP_SIZE;
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
+ pmd_t *pmd;
+
+ BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
+
+#if CONFIG_PGTABLE_LEVELS == 3
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER,
+ PAGE_SIZE << PMD_TABLE_ORDER);
+ pud_populate(NULL, pud, pmd);
+ }
+#endif
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmd, pte);
+
+ addr += PAGE_SIZE;
+ } while (addr < end);
+}
+
static void __init parisc_bootmem_free(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
@@ -684,6 +707,7 @@ void __init paging_init(void)
setup_bootmem();
pagetable_init();
gateway_init();
+ fixmap_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
@@ -691,6 +715,77 @@ void __init paging_init(void)
parisc_bootmem_free();
}
+static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
+ unsigned long entry_info)
+{
+ const int slot_max = btlb_info.fixed_range_info.num_comb;
+ int min_num_pages = btlb_info.min_size;
+ unsigned long size;
+
+ /* map at minimum 4 pages */
+ if (min_num_pages < 4)
+ min_num_pages = 4;
+
+ size = HUGEPAGE_SIZE;
+ while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
+ /* starting address must have same alignment as size! */
+ /* if correctly aligned and fits in double size, increase */
+ if (((start & (2 * size - 1)) == 0) &&
+ (end - start) >= (2 * size)) {
+ size <<= 1;
+ continue;
+ }
+ /* if current size alignment is too big, try smaller size */
+ if ((start & (size - 1)) != 0) {
+ size >>= 1;
+ continue;
+ }
+ if ((end - start) >= size) {
+ if ((size >> PAGE_SHIFT) >= min_num_pages)
+ pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
+ size >> PAGE_SHIFT, entry_info, *slot);
+ (*slot)++;
+ start += size;
+ continue;
+ }
+ size /= 2;
+ continue;
+ }
+}
+
+void btlb_init_per_cpu(void)
+{
+ unsigned long s, t, e;
+ int slot;
+
+ /* BTLBs are not available on 64-bit CPUs */
+ if (IS_ENABLED(CONFIG_PA20))
+ return;
+ else if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+
+ /* insert BLTLBs for code and data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_stext);
+ e = (uintptr_t) dereference_function_descriptor(&_etext);
+ t = (uintptr_t) dereference_function_descriptor(&_sdata);
+ BUG_ON(t != e);
+
+ /* code segments */
+ slot = 0;
+ alloc_btlb(s, e, &slot, 0x13800000);
+
+ /* sanity check */
+ t = (uintptr_t) dereference_function_descriptor(&_edata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_start);
+ BUG_ON(t != e);
+
+ /* data segments */
+ s = (uintptr_t) dereference_function_descriptor(&_sdata);
+ e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
+ alloc_btlb(s, e, &slot, 0x11800000);
+}
+
#ifdef CONFIG_PA20
/*
@@ -721,7 +816,7 @@ static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
static unsigned long dirty_space_id[SID_ARRAY_SIZE];
static unsigned long space_id_index;
static unsigned long free_space_ids = NR_SPACE_IDS - 1;
-static unsigned long dirty_space_ids = 0;
+static unsigned long dirty_space_ids;
static DEFINE_SPINLOCK(sid_lock);
@@ -842,9 +937,9 @@ void flush_tlb_all(void)
{
int do_recycle;
- __inc_irq_stat(irq_tlb_count);
do_recycle = 0;
spin_lock(&sid_lock);
+ __inc_irq_stat(irq_tlb_count);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
@@ -863,10 +958,50 @@ void flush_tlb_all(void)
#else
void flush_tlb_all(void)
{
- __inc_irq_stat(irq_tlb_count);
spin_lock(&sid_lock);
+ __inc_irq_stat(irq_tlb_count);
flush_tlb_all_local(NULL);
recycle_sids();
spin_unlock(&sid_lock);
}
#endif
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_NONE,
+ [VM_WRITE | VM_READ] = PAGE_READONLY,
+ [VM_EXEC] = PAGE_EXECREAD,
+ [VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE] = PAGE_EXECREAD,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
+};
+DECLARE_VM_GET_PAGE_PROT
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = VMALLOC_START,
+ .end = VMALLOC_END,
+ .pgprot = PAGE_KERNEL_RWX,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */