summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/entry.S')
-rw-r--r--arch/parisc/kernel/entry.S127
1 files changed, 81 insertions, 46 deletions
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ae03b8679696..e04c5d806c10 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -36,6 +36,24 @@
.level 2.0
#endif
+/*
+ * We need seven instructions after a TLB insert for it to take effect.
+ * The PA8800/PA8900 processors are an exception and need 12 instructions.
+ * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
+ */
+#ifdef CONFIG_64BIT
+#define NUM_PIPELINE_INSNS 12
+#else
+#define NUM_PIPELINE_INSNS 7
+#endif
+
+ /* Insert num nops */
+ .macro insert_nops num
+ .rept \num
+ nop
+ .endr
+ .endm
+
/* Get aligned page_table_lock address for this mm from cr28/tr4 */
.macro get_ptl reg
mfctl %cr28,\reg
@@ -415,24 +433,20 @@
3:
.endm
- /* Release page_table_lock without reloading lock address.
- We use an ordered store to ensure all prior accesses are
- performed prior to releasing the lock. */
- .macro ptl_unlock0 spc,tmp,tmp2
+ /* Release page_table_lock if for user space. We use an ordered
+ store to ensure all prior accesses are performed prior to
+ releasing the lock. Note stw may not be executed, so we
+ provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
+ .macro ptl_unlock spc,tmp,tmp2
#ifdef CONFIG_TLB_PTLOCK
-98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
+98: get_ptl \tmp
+ ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
or,COND(=) %r0,\spc,%r0
stw,ma \tmp2,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- .endm
-
- /* Release page_table_lock. */
- .macro ptl_unlock1 spc,tmp,tmp2
-#ifdef CONFIG_TLB_PTLOCK
-98: get_ptl \tmp
- ptl_unlock0 \spc,\tmp,\tmp2
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ insert_nops NUM_PIPELINE_INSNS - 4
+#else
+ insert_nops NUM_PIPELINE_INSNS - 1
#endif
.endm
@@ -461,13 +475,13 @@
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
+ #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
- extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
@@ -475,8 +489,7 @@
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
- extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
- 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
@@ -486,6 +499,12 @@
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
+
+#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
+ /* need to drop DMB bit, as it's used as SPECIAL flag */
+ depi 0,_PAGE_SPECIAL_BIT,1,\pte
+#endif
+
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
* D <-> _PAGE_DIRTY
@@ -498,17 +517,18 @@
* Finally, _PAGE_READ goes in the top bit of PL1 (so we
* trigger an access rights trap in user space if the user
* tries to read an unreadable page */
-#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
- /* need to drop DMB bit, as it's used as SPECIAL flag */
- depi 0,_PAGE_SPECIAL_BIT,1,\pte
-#endif
depd \pte,8,7,\prot
/* PAGE_USER indicates the page can be read with user privileges,
* so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
- * contains _PAGE_READ) */
+ * contains _PAGE_READ). While the kernel can't directly write
+ * user pages which have _PAGE_WRITE zero, it can read pages
+ * which have _PAGE_READ zero (PL <= PL1). Thus, the kernel
+ * exception fault handler doesn't trigger when reading pages
+ * that aren't user read accessible */
extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
depdi 7,11,3,\prot
+
/* If we're a gateway page, drop PL2 back to zero for promotion
* to kernel privilege (so we can execute the page as kernel).
* Any privilege promotion page always denys read and write */
@@ -1038,23 +1058,26 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
-#if 0 && defined(CONFIG_64BIT)
- /* Revisit when we have 64-bit code above 4Gb */
- b,n intr_save2
-
+#if defined(CONFIG_64BIT)
skip_save_ior:
/* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
- extrd,u,* %r8,PSW_W_BIT,1,%r1
- cmpib,COND(=),n 1,%r1,intr_save2
+ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
- /* adjust iasq/iaoq */
+ /* adjust iasq0/iaoq0 */
space_adjust %r16,%r17,%r1
STREG %r16, PT_IASQ0(%r29)
STREG %r17, PT_IAOQ0(%r29)
+
+ LDREG PT_IASQ1(%r29), %r16
+ LDREG PT_IAOQ1(%r29), %r17
+ /* adjust iasq1/iaoq1 */
+ space_adjust %r16,%r17,%r1
+ STREG %r16, PT_IASQ1(%r29)
+ STREG %r17, PT_IAOQ1(%r29)
#else
skip_save_ior:
#endif
@@ -1124,7 +1147,7 @@ dtlb_miss_20w:
idtlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1133,6 +1156,7 @@ dtlb_check_alias_20w:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1150,7 +1174,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1159,6 +1183,7 @@ nadtlb_check_alias_20w:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1184,7 +1209,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1194,6 +1219,7 @@ dtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1217,7 +1243,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1227,6 +1253,7 @@ nadtlb_check_alias_11:
idtlba pte,(va)
idtlbp prot,(va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1246,7 +1273,7 @@ dtlb_miss_20:
idtlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1255,6 +1282,7 @@ dtlb_check_alias_20:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1274,7 +1302,7 @@ nadtlb_miss_20:
idtlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1283,6 +1311,7 @@ nadtlb_check_alias_20:
idtlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1319,7 +1348,7 @@ itlb_miss_20w:
iitlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1343,7 +1372,7 @@ naitlb_miss_20w:
iitlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1352,6 +1381,7 @@ naitlb_check_alias_20w:
iitlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1377,7 +1407,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1401,7 +1431,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1411,6 +1441,7 @@ naitlb_check_alias_11:
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1431,7 +1462,7 @@ itlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1451,7 +1482,7 @@ naitlb_miss_20:
iitlbt pte,prot
- ptl_unlock1 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1460,6 +1491,7 @@ naitlb_check_alias_20:
iitlbt pte,prot
+ insert_nops NUM_PIPELINE_INSNS - 1
rfir
nop
@@ -1481,7 +1513,7 @@ dbit_trap_20w:
idtlbt pte,prot
- ptl_unlock0 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
#else
@@ -1507,7 +1539,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- ptl_unlock0 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
@@ -1527,7 +1559,7 @@ dbit_trap_20:
idtlbt pte,prot
- ptl_unlock0 spc,t0,t1
+ ptl_unlock spc,t0,t1
rfir
nop
#endif
@@ -1814,6 +1846,10 @@ syscall_restore_rfi:
extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
+#ifdef CONFIG_64BIT
+ extru,<> %r19,TIF_32BIT_PA_BIT,1,%r0
+ depi -1,4,1,%r20 /* W bit */
+#endif
STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */
@@ -1827,7 +1863,6 @@ syscall_restore_rfi:
STREG %r25,TASK_PT_IASQ0(%r1)
STREG %r25,TASK_PT_IASQ1(%r1)
- /* XXX W bit??? */
/* Now if old D bit is clear, it means we didn't save all registers
* on syscall entry, so do that now. This only happens on TRACEME
* calls, or if someone attached to us while we were on a syscall.