summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm/pgtable.h')
-rw-r--r--arch/s390/include/asm/pgtable.h62
1 files changed, 39 insertions, 23 deletions
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1f8f5da53262..dcac7b2df72c 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -17,6 +17,7 @@
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
#include <linux/atomic.h>
+#include <asm/sections.h>
#include <asm/bug.h>
#include <asm/page.h>
#include <asm/uv.h>
@@ -84,16 +85,16 @@ extern unsigned long zero_page_mask;
* happen without trampolines and in addition the placement within a
* 2GB frame is branch prediction unit friendly.
*/
-extern unsigned long VMALLOC_START;
-extern unsigned long VMALLOC_END;
+extern unsigned long __bootdata_preserved(VMALLOC_START);
+extern unsigned long __bootdata_preserved(VMALLOC_END);
#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
-extern struct page *vmemmap;
-extern unsigned long vmemmap_size;
+extern struct page *__bootdata_preserved(vmemmap);
+extern unsigned long __bootdata_preserved(vmemmap_size);
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-extern unsigned long MODULES_VADDR;
-extern unsigned long MODULES_END;
+extern unsigned long __bootdata_preserved(MODULES_VADDR);
+extern unsigned long __bootdata_preserved(MODULES_END);
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
@@ -553,27 +554,25 @@ static inline int mm_uses_skeys(struct mm_struct *mm)
static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " csp %0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " csp %[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
+ union register_pair r1 = { .even = old, .odd = new, };
unsigned long address = (unsigned long)ptr | 1;
asm volatile(
- " .insn rre,0xb98a0000,%0,%3"
- : "+d" (reg2), "+m" (*ptr)
- : "d" (reg3), "d" (address)
+ " .insn rre,0xb98a0000,%[r1],%[address]"
+ : [r1] "+&d" (r1.pair), "+m" (*ptr)
+ : [address] "d" (address)
: "cc");
}
@@ -587,14 +586,12 @@ static inline void crdte(unsigned long old, unsigned long new,
unsigned long table, unsigned long dtt,
unsigned long address, unsigned long asce)
{
- register unsigned long reg2 asm("2") = old;
- register unsigned long reg3 asm("3") = new;
- register unsigned long reg4 asm("4") = table | dtt;
- register unsigned long reg5 asm("5") = address;
+ union register_pair r1 = { .even = old, .odd = new, };
+ union register_pair r2 = { .even = table | dtt, .odd = address, };
- asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
- : "+d" (reg2)
- : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
+ asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
+ : [r1] "+&d" (r1.pair)
+ : [r2] "d" (r2.pair), [asce] "a" (asce)
: "memory", "cc");
}
@@ -862,6 +859,25 @@ static inline int pte_unused(pte_t pte)
}
/*
+ * Extract the pgprot value from the given pte while at the same time making it
+ * usable for kernel address space mappings where fault driven dirty and
+ * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
+ * must not be set.
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
+
+ if (pte_write(pte))
+ pte_flags |= pgprot_val(PAGE_KERNEL);
+ else
+ pte_flags |= pgprot_val(PAGE_KERNEL_RO);
+ pte_flags |= pte_val(pte) & mio_wb_bit_mask;
+
+ return __pgprot(pte_flags);
+}
+
+/*
* pgd/pmd/pte modification functions
*/