summaryrefslogtreecommitdiff
path: root/arch/mips/mm/tlbex.c
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@imgtec.com>2016-04-19 09:25:05 +0100
committerRalf Baechle <ralf@linux-mips.org>2016-05-13 15:30:25 +0200
commit7b2cb64f91f25a7293b10054e20d1c0734ffab6f (patch)
tree6631b4d6dc4e3294bfecd1835000578ef374bd0f /arch/mips/mm/tlbex.c
parent745f35587846249b392aa548b4c5f54cd69ed688 (diff)
MIPS: mm: Fix MIPS32 36b physical addressing (alchemy, netlogic)
There are 2 distinct cases in which a kernel for a MIPS32 CPU (CONFIG_CPU_MIPS32=y) may use 64 bit physical addresses (CONFIG_PHYS_ADDR_T_64BIT=y): - 36 bit physical addressing as used by RMI Alchemy & Netlogic XLP/XLR CPUs. - MIPS32r5 eXtended Physical Addressing (XPA). These 2 cases are distinct in that they require different behaviour from the kernel - the EntryLo registers have different formats. Until Linux v4.1 we only supported the first case, with code conditional upon the 2 aforementioned Kconfig variables being set. Commit c5b367835cfc ("MIPS: Add support for XPA.") added support for the second case, but did so by modifying the code that existed for the first case rather than treating the 2 cases as distinct. Since the EntryLo registers have different formats this breaks the 36 bit Alchemy/XLP/XLR case. Fix this by splitting the 2 cases, with XPA cases now being conditional upon CONFIG_XPA and the non-XPA case matching the code as it existed prior to commit c5b367835cfc ("MIPS: Add support for XPA."). Signed-off-by: Paul Burton <paul.burton@imgtec.com> Reported-by: Manuel Lauss <manuel.lauss@gmail.com> Tested-by: Manuel Lauss <manuel.lauss@gmail.com> Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Cc: James Hogan <james.hogan@imgtec.com> Cc: David Daney <david.daney@cavium.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: David Hildenbrand <dahi@linux.vnet.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Alex Smith <alex.smith@imgtec.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: stable@vger.kernel.org # v4.1+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13119/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm/tlbex.c')
-rw-r--r--arch/mips/mm/tlbex.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 67966f30c522..db4adf9cc65c 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1011,25 +1011,21 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
- /*
- * 64bit address support (36bit on a 32bit CPU) in a 32bit
- * Kernel is a special case. Only a few CPUs use it.
- */
- if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
+ if (config_enabled(CONFIG_XPA)) {
int pte_off_even = sizeof(pte_t) / 2;
int pte_off_odd = pte_off_even + sizeof(pte_t);
-#ifdef CONFIG_XPA
const int scratch = 1; /* Our extra working register */
uasm_i_addu(p, scratch, 0, ptep);
-#endif
+
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
-#ifdef CONFIG_XPA
+
uasm_i_lw(p, tmp, 0, scratch);
uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
uasm_i_lui(p, scratch, 0xff);
@@ -1038,7 +1034,22 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
uasm_i_and(p, ptep, scratch, ptep);
uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
-#endif
+ return;
+ }
+
+ /*
+ * 64bit address support (36bit on a 32bit CPU) in a 32bit
+ * Kernel is a special case. Only a few CPUs use it.
+ */
+ if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
+ int pte_off_even = sizeof(pte_t) / 2;
+ int pte_off_odd = pte_off_even + sizeof(pte_t);
+
+ uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
+
+ uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
+ UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
return;
}
@@ -1637,7 +1648,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
- if (!cpu_has_64bits) {
+ if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
const int scratch = 1; /* Our extra working register */
uasm_i_lui(p, scratch, (mode >> 16));