summaryrefslogtreecommitdiff
path: root/arch/nds32
diff options
context:
space:
mode:
Diffstat (limited to 'arch/nds32')
-rw-r--r--arch/nds32/include/asm/highmem.h1
-rw-r--r--arch/nds32/include/asm/pgtable.h22
-rw-r--r--arch/nds32/kernel/head.S2
-rw-r--r--arch/nds32/kernel/module.c2
-rw-r--r--arch/nds32/kernel/traps.c15
-rw-r--r--arch/nds32/kernel/vdso.c6
-rw-r--r--arch/nds32/mm/fault.c17
-rw-r--r--arch/nds32/mm/init.c13
-rw-r--r--arch/nds32/mm/proc.c7
9 files changed, 28 insertions, 57 deletions
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index 5717647d14d1..fe986d0e6e3f 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -7,7 +7,6 @@
#include <asm/proc-fns.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
/*
* Right now we initialize only a single pte table. It can be extended
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 476cc4dd1709..419f984eef70 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -186,16 +186,10 @@ extern void paging_init(void);
#define pte_clear(mm,addr,ptep) set_pte_at((mm),(addr),(ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
-#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
-#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
-
-#define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
+static unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK));
+}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
@@ -346,12 +340,6 @@ static inline pmd_t __mk_pmd(pte_t * ptep, unsigned long prot)
*
*/
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
-
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = 0xfff;
@@ -374,8 +362,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
/*
* We provide our own arch_get_unmapped_area to cope with VIPT caches.
*/
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index fcefb62606ca..7347f00451a9 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -3,10 +3,10 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <linux/sizes.h>
#include <asm/thread_info.h>
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
index 1e31829cbc2a..3897fd14a21d 100644
--- a/arch/nds32/kernel/module.c
+++ b/arch/nds32/kernel/module.c
@@ -5,7 +5,7 @@
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/moduleloader.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
void *module_alloc(unsigned long size)
{
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index f4d386b52622..6a9772ba7392 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -97,18 +97,19 @@ static void dump_instr(struct pt_regs *regs)
}
#define LOOP_TIMES (100)
-static void __dump(struct task_struct *tsk, unsigned long *base_reg)
+static void __dump(struct task_struct *tsk, unsigned long *base_reg,
+ const char *loglvl)
{
unsigned long ret_addr;
int cnt = LOOP_TIMES, graph = 0;
- pr_emerg("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
while (!kstack_end(base_reg)) {
ret_addr = *base_reg++;
if (__kernel_text_address(ret_addr)) {
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
@@ -124,17 +125,17 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
ret_addr = ftrace_graph_ret_addr(
tsk, &graph, ret_addr, NULL);
- print_ip_sym(ret_addr);
+ print_ip_sym(loglvl, ret_addr);
}
if (--cnt < 0)
break;
base_reg = (unsigned long *)next_fp;
}
}
- pr_emerg("\n");
+ printk("%s\n", loglvl);
}
-void show_stack(struct task_struct *tsk, unsigned long *sp)
+void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
unsigned long *base_reg;
@@ -151,7 +152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
else
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
}
- __dump(tsk, base_reg);
+ __dump(tsk, base_reg, loglvl);
barrier();
}
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 90bcae6f8554..e16009a07971 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -130,7 +130,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
#endif
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
addr = vdso_random_addr(vdso_mapping_len);
@@ -185,12 +185,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
goto up_fail;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return 0;
up_fail:
mm->context.vdso = NULL;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index f331e533edc2..8fb73f6401a0 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -11,7 +11,6 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
extern void die(const char *str, struct pt_regs *regs, long err);
@@ -127,12 +126,12 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!user_mode(regs) &&
!search_exception_tables(instruction_pointer(regs)))
goto no_context;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in which
@@ -211,7 +210,7 @@ good_area:
/*
* If we need to retry but a fatal signal is pending, handle the
- * signal first. We do not need to release the mmap_sem because it
+ * signal first. We do not need to release the mmap_lock because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if (fault_signal_pending(fault, regs)) {
@@ -248,7 +247,7 @@ good_area:
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
- /* No need to up_read(&mm->mmap_sem) as we would
+ /* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -256,7 +255,7 @@ good_area:
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return;
/*
@@ -264,7 +263,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
@@ -324,14 +323,14 @@ no_context:
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 91147cca4b64..fa86f7b2f416 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -98,9 +98,6 @@ static pmd_t *fixmap_pmd_p;
static void __init fixedrange_init(void)
{
unsigned long vaddr;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
#ifdef CONFIG_HIGHMEM
pte_t *pte;
@@ -110,10 +107,7 @@ static void __init fixedrange_init(void)
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!fixmap_pmd_p)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -126,10 +120,7 @@ static void __init fixedrange_init(void)
*/
vaddr = PKMAP_BASE;
- pgd = swapper_pg_dir + pgd_index(vaddr);
- p4d = p4d_offset(pgd, vaddr);
- pud = pud_offset(p4d, vaddr);
- pmd = pmd_offset(pud, vaddr);
+ pmd = pmd_off_k(vaddr);
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
index 837ae7728830..848c845f5f33 100644
--- a/arch/nds32/mm/proc.c
+++ b/arch/nds32/mm/proc.c
@@ -5,7 +5,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/nds32.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/l2_cache.h>
@@ -16,14 +15,10 @@ extern struct cache_info L1_cache_info[2];
int va_kernel_present(unsigned long addr)
{
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
pte_t *ptep, pte;
- p4d = p4d_offset(pgd_offset_k(addr), addr);
- pud = pud_offset(p4d, addr);
- pmd = pmd_offset(pud, addr);
+ pmd = pmd_off_k(addr);
if (!pmd_none(*pmd)) {
ptep = pte_offset_map(pmd, addr);
pte = *ptep;