summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/pgtsrmmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm/pgtsrmmu.h')
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h20
1 files changed, 6 insertions, 14 deletions
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index 6067925972d9..a265822a475e 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -10,7 +10,7 @@
#include <asm/page.h>
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
#endif
@@ -53,21 +53,13 @@
#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
-/* SRMMU swap entry encoding
- *
- * We use 5 bits for the type and 19 for the offset. This gives us
- * 32 swapfiles of 4GB each. Encoding looks like:
- *
- * oooooooooooooooooootttttRRRRRRRR
- * fedcba9876543210fedcba9876543210
- *
- * The bottom 7 bits are reserved for protection and status bits, especially
- * PRESENT.
- */
+/* SRMMU swap entry encoding */
#define SRMMU_SWP_TYPE_MASK 0x1f
#define SRMMU_SWP_TYPE_SHIFT 7
#define SRMMU_SWP_OFF_MASK 0xfffff
#define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5)
+/* We borrow bit 6 to store the exclusive marker in swap PTEs. */
+#define SRMMU_SWP_EXCLUSIVE SRMMU_DIRTY
/* Some day I will implement true fine grained access bits for
* user pages because the SRMMU gives us the capabilities to
@@ -105,7 +97,7 @@
bne 99b; \
restore %g0, %g0, %g0;
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long last_valid_pfn;
/* This makes sense. Honest it does - Anton */
@@ -144,6 +136,6 @@ srmmu_get_pte (unsigned long addr)
return entry;
}
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* !(_SPARC_PGTSRMMU_H) */