From d460bb6c6417588dd8b0907d34f69b237918812a Mon Sep 17 00:00:00 2001 From: Niklas Schnelle Date: Fri, 19 Feb 2021 12:00:52 +0100 Subject: s390: enable HAVE_IOREMAP_PROT In commit b02002cc4c0f ("s390/pci: Implement ioremap_wc/prot() with MIO") we implemented both ioremap_wc() and ioremap_prot() however until now we had not set HAVE_IOREMAP_PROT in Kconfig, do so now. This also requires implementing pte_pgprot() as this is used in the generic_access_phys() code enabled by CONFIG_HAVE_IOREMAP_PROT. As with ioremap_wc() we need to take the MMIO Write Back bit index into account. Moreover since the pgprot value returned from pte_pgprot() is to be used for mappings into kernel address space we must make sure that it uses appropriate kernel page table protection bits. In particular a pgprot value originally coming from userspace could have the _PAGE_PROTECT bit set to enable fault based dirty bit accounting which would then make the mapping inaccessible when used in kernel address space. Fixes: b02002cc4c0f ("s390/pci: Implement ioremap_wc/prot() with MIO") Reviewed-by: Gerald Schaefer Signed-off-by: Niklas Schnelle Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/pgtable.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/s390/include/asm/pgtable.h') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 29c7ecd5ad1d..9512f6820ead 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -865,6 +865,25 @@ static inline int pte_unused(pte_t pte) return pte_val(pte) & _PAGE_UNUSED; } +/* + * Extract the pgprot value from the given pte while at the same time making it + * usable for kernel address space mappings where fault driven dirty and + * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID + * must not be set. + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK; + + if (pte_write(pte)) + pte_flags |= pgprot_val(PAGE_KERNEL); + else + pte_flags |= pgprot_val(PAGE_KERNEL_RO); + pte_flags |= pte_val(pte) & mio_wb_bit_mask; + + return __pgprot(pte_flags); +} + /* * pgd/pmd/pte modification functions */ -- cgit From 0c4f2623b95779fe8cfb277fa255e4b91c0f96f0 Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Tue, 6 Oct 2020 22:12:39 +0200 Subject: s390: setup kernel memory layout early Currently there are two separate places where kernel memory layout has to be known and adjusted: 1. early kasan setup. 2. paging setup later. Those 2 places had to be kept in sync and adjusted to reflect peculiar technical details of one another. With additional factors which influence kernel memory layout like ultravisor secure storage limit, complexity of keeping two things in sync grew up even more. Besides that if we look forward towards creating identity mapping and enabling DAT before jumping into uncompressed kernel - that would also require full knowledge of and control over kernel memory layout. So, de-duplicate and move kernel memory layout setup logic into the decompressor. Reviewed-by: Alexander Gordeev Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/pgtable.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/s390/include/asm/pgtable.h') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9512f6820ead..5677be473261 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -86,16 +87,16 @@ extern unsigned long zero_page_mask; * happen without trampolines and in addition the placement within a * 2GB frame is branch prediction unit friendly. */ -extern unsigned long VMALLOC_START; -extern unsigned long VMALLOC_END; +extern unsigned long __bootdata_preserved(VMALLOC_START); +extern unsigned long __bootdata_preserved(VMALLOC_END); #define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN) -extern struct page *vmemmap; -extern unsigned long vmemmap_size; +extern struct page *__bootdata_preserved(vmemmap); +extern unsigned long __bootdata_preserved(vmemmap_size); #define VMEM_MAX_PHYS ((unsigned long) vmemmap) -extern unsigned long MODULES_VADDR; -extern unsigned long MODULES_END; +extern unsigned long __bootdata_preserved(MODULES_VADDR); +extern unsigned long __bootdata_preserved(MODULES_END); #define MODULES_VADDR MODULES_VADDR #define MODULES_END MODULES_END #define MODULES_LEN (1UL << 31) -- cgit From 53c1c2504b6b35871b20c832be96163c846f3517 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 14 Jun 2021 19:38:07 +0200 Subject: s390/pgtable: use register pair instead of register asm Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/pgtable.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'arch/s390/include/asm/pgtable.h') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5677be473261..90e45ac5516f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -558,27 +558,25 @@ static inline int mm_uses_skeys(struct mm_struct *mm) static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) { - register unsigned long reg2 asm("2") = old; - register unsigned long reg3 asm("3") = new; + union register_pair r1 = { .even = old, .odd = new, }; unsigned long address = (unsigned long)ptr | 1; asm volatile( - " csp %0,%3" - : "+d" (reg2), "+m" (*ptr) - : "d" (reg3), "d" (address) + " csp %[r1],%[address]" + : [r1] "+&d" (r1.pair), "+m" (*ptr) + : [address] "d" (address) : "cc"); } static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new) { - register unsigned long reg2 asm("2") = old; - register unsigned long reg3 asm("3") = new; + union register_pair r1 = { .even = old, .odd = new, }; unsigned long address = (unsigned long)ptr | 1; asm volatile( - " .insn rre,0xb98a0000,%0,%3" - : "+d" (reg2), "+m" (*ptr) - : "d" (reg3), "d" (address) + " .insn rre,0xb98a0000,%[r1],%[address]" + : [r1] "+&d" (r1.pair), "+m" (*ptr) + : [address] "d" (address) : "cc"); } @@ -592,14 +590,12 @@ static inline void crdte(unsigned long old, unsigned long new, unsigned long table, unsigned long dtt, unsigned long address, unsigned long asce) { - register unsigned long reg2 asm("2") = old; - register unsigned long reg3 asm("3") = new; - register unsigned long reg4 asm("4") = table | dtt; - register unsigned long reg5 asm("5") = address; + union register_pair r1 = { .even = old, .odd = new, }; + union register_pair r2 = { .even = table | dtt, .odd = address, }; - asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0" - : "+d" (reg2) - : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce) + asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0" + : [r1] "+&d" (r1.pair) + : [r2] "d" (r2.pair), [asce] "a" (asce) : "memory", "cc"); } -- cgit