diff options
| -rw-r--r-- | arch/xtensa/Kconfig.debug | 10 | ||||
| -rw-r--r-- | arch/xtensa/kernel/entry.S | 9 | ||||
| -rw-r--r-- | arch/xtensa/mm/tlb.c | 113 | 
3 files changed, 131 insertions, 1 deletions
| diff --git a/arch/xtensa/Kconfig.debug b/arch/xtensa/Kconfig.debug index a34010e0e51c..af7da74d535f 100644 --- a/arch/xtensa/Kconfig.debug +++ b/arch/xtensa/Kconfig.debug @@ -2,6 +2,16 @@ menu "Kernel hacking"  source "lib/Kconfig.debug" +config DEBUG_TLB_SANITY +	bool "Debug TLB sanity" +	depends on DEBUG_KERNEL +	help +	  Enable this to turn on TLB sanity check on each entry to userspace. +	  This check can spot missing TLB invalidation/wrong PTE permissions/ +	  premature page freeing. + +	  If unsure, say N. +  config LD_NO_RELAX  	bool "Disable linker relaxation"  	default n diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index fa94512ff84d..9298742f0fd0 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -458,7 +458,7 @@ common_exception_return:  	_bbsi.l	a4, TIF_NEED_RESCHED, 3f  	_bbsi.l	a4, TIF_NOTIFY_RESUME, 2f -	_bbci.l	a4, TIF_SIGPENDING, 4f +	_bbci.l	a4, TIF_SIGPENDING, 5f  2:	l32i	a4, a1, PT_DEPC  	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f @@ -476,6 +476,13 @@ common_exception_return:  	callx4	a4  	j	1b +5: +#ifdef CONFIG_DEBUG_TLB_SANITY +	l32i	a4, a1, PT_DEPC +	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f +	movi	a4, check_tlb_sanity +	callx4	a4 +#endif  4:	/* Restore optional registers. */  	load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 743346150eea..ca9d2366bf12 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -141,3 +141,116 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)  	local_irq_restore(flags);  } + +#ifdef CONFIG_DEBUG_TLB_SANITY + +static unsigned get_pte_for_vaddr(unsigned vaddr) +{ +	struct task_struct *task = get_current(); +	struct mm_struct *mm = task->mm; +	pgd_t *pgd; +	pmd_t *pmd; +	pte_t *pte; + +	if (!mm) +		mm = task->active_mm; +	pgd = pgd_offset(mm, vaddr); +	if (pgd_none_or_clear_bad(pgd)) +		return 0; +	pmd = pmd_offset(pgd, vaddr); +	if (pmd_none_or_clear_bad(pmd)) +		return 0; +	pte = pte_offset_map(pmd, vaddr); +	if (!pte) +		return 0; +	return pte_val(*pte); +} + +enum { +	TLB_SUSPICIOUS	= 1, +	TLB_INSANE	= 2, +}; + +static void tlb_insane(void) +{ +	BUG_ON(1); +} + +static void tlb_suspicious(void) +{ +	WARN_ON(1); +} + +/* + * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), + * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. + * + * Check that valid TLB entries either have the same PA as the PTE, or PTE is + * marked as non-present. Non-present PTE and the page with non-zero refcount + * and zero mapcount is normal for batched TLB flush operation. Zero refcount + * means that the page was freed prematurely. Non-zero mapcount is unusual, + * but does not necessary means an error, thus marked as suspicious. + */ +static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) +{ +	unsigned tlbidx = w | (e << PAGE_SHIFT); +	unsigned r0 = dtlb ? +		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); +	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); +	unsigned pte = get_pte_for_vaddr(vpn); +	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; +	unsigned tlb_asid = r0 & ASID_MASK; +	bool kernel = tlb_asid == 1; +	int rc = 0; + +	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { +		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", +				dtlb ? 'D' : 'I', w, e, vpn, +				kernel ? "kernel" : "user"); +		rc |= TLB_INSANE; +	} + +	if (tlb_asid == mm_asid) { +		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : +			read_itlb_translation(tlbidx); +		if ((pte ^ r1) & PAGE_MASK) { +			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", +					dtlb ? 'D' : 'I', w, e, r0, r1, pte); +			if (pte == 0 || !pte_present(__pte(pte))) { +				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); +				pr_err("page refcount: %d, mapcount: %d\n", +						page_count(p), +						page_mapcount(p)); +				if (!page_count(p)) +					rc |= TLB_INSANE; +				else if (page_mapped(p)) +					rc |= TLB_SUSPICIOUS; +			} else { +				rc |= TLB_INSANE; +			} +		} +	} +	return rc; +} + +void check_tlb_sanity(void) +{ +	unsigned long flags; +	unsigned w, e; +	int bug = 0; + +	local_irq_save(flags); +	for (w = 0; w < DTLB_ARF_WAYS; ++w) +		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) +			bug |= check_tlb_entry(w, e, true); +	for (w = 0; w < ITLB_ARF_WAYS; ++w) +		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) +			bug |= check_tlb_entry(w, e, false); +	if (bug & TLB_INSANE) +		tlb_insane(); +	if (bug & TLB_SUSPICIOUS) +		tlb_suspicious(); +	local_irq_restore(flags); +} + +#endif /* CONFIG_DEBUG_TLB_SANITY */ | 
