summaryrefslogtreecommitdiff
path: root/arch/xtensa/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 18:35:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-14 18:35:53 -0800
commitedd7ab76847442e299af64a761febd180d71f98d (patch)
tree91c3ee6ca27074c655bc1aaf04358f4e41df54d4 /arch/xtensa/include
parentadb35e8dc98ba9bda99ff79ac6a05b8fcde2a762 (diff)
parent68061c02bb295da4955f0d309b9459f0a7ba83dd (diff)
Merge tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull kmap updates from Thomas Gleixner: "The new preemtible kmap_local() implementation: - Consolidate all kmap_atomic() internals into a generic implementation which builds the base for the kmap_local() API and make the kmap_atomic() interface wrappers which handle the disabling/enabling of preemption and pagefaults. - Switch the storage from per-CPU to per task and provide scheduler support for clearing mapping when scheduling out and restoring them when scheduling back in. - Merge the migrate_disable/enable() code, which is also part of the scheduler pull request. This was required to make the kmap_local() interface available which does not disable preemption when a mapping is established. It has to disable migration instead to guarantee that the virtual address of the mapped slot is the same across preemption. - Provide better debug facilities: guard pages and enforced utilization of the mapping mechanics on 64bit systems when the architecture allows it. - Provide the new kmap_local() API which can now be used to cleanup the kmap_atomic() usage sites all over the place. Most of the usage sites do not require the implicit disabling of preemption and pagefaults so the penalty on 64bit and 32bit non-highmem systems is removed and quite some of the code can be simplified. A wholesale conversion is not possible because some usage depends on the implicit side effects and some need to be cleaned up because they work around these side effects. The migrate disable side effect is only effective on highmem systems and when enforced debugging is enabled. On 64bit and 32bit non-highmem systems the overhead is completely avoided" * tag 'core-mm-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (33 commits) ARM: highmem: Fix cache_is_vivt() reference x86/crashdump/32: Simplify copy_oldmem_page() io-mapping: Provide iomap_local variant mm/highmem: Provide kmap_local* sched: highmem: Store local kmaps in task struct x86: Support kmap_local() forced debugging mm/highmem: Provide CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP mm/highmem: Provide and use CONFIG_DEBUG_KMAP_LOCAL microblaze/mm/highmem: Add dropped #ifdef back xtensa/mm/highmem: Make generic kmap_atomic() work correctly mm/highmem: Take kmap_high_get() properly into account highmem: High implementation details and document API Documentation/io-mapping: Remove outdated blurb io-mapping: Cleanup atomic iomap mm/highmem: Remove the old kmap_atomic cruft highmem: Get rid of kmap_types.h xtensa/mm/highmem: Switch to generic kmap atomic sparc/mm/highmem: Switch to generic kmap atomic powerpc/mm/highmem: Switch to generic kmap atomic nds32/mm/highmem: Switch to generic kmap atomic ...
Diffstat (limited to 'arch/xtensa/include')
-rw-r--r--arch/xtensa/include/asm/fixmap.h59
-rw-r--r--arch/xtensa/include/asm/highmem.h15
2 files changed, 22 insertions, 52 deletions
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index a06ffb0c61c7..1c65dc1d3397 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -16,64 +16,23 @@
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <linux/pgtable.h>
-#include <asm/kmap_types.h>
-#endif
+#include <asm/kmap_size.h>
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the start of the consistent memory region upwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * higher than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- */
+/* The map slots for temporary mappings via kmap_atomic/local(). */
enum fixed_addresses {
-#ifdef CONFIG_HIGHMEM
- /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN +
- (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
-#endif
+ (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
+#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+/* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
+#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
+#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
-#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
-
-#ifndef __ASSEMBLY__
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /* Check if this memory layout is broken because fixmap overlaps page
- * table.
- */
- BUILD_BUG_ON(FIXADDR_START <
- TLBTEMP_BASE_1 + TLBTEMP_SIZE);
- BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif
+#include <asm-generic/fixmap.h>
+#endif /* CONFIG_HIGHMEM */
#endif
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index eac503215f17..34b8b620e7f1 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -12,13 +12,13 @@
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
+#ifdef CONFIG_HIGHMEM
#include <linux/wait.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-#include <asm/kmap_types.h>
-#define PKMAP_BASE ((FIXADDR_START - \
+#define PKMAP_BASE ((FIXADDR_START - \
(LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
@@ -59,6 +59,13 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
return pkmap_map_wait_arr + color;
}
+
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
+#define arch_kmap_local_map_idx kmap_local_map_idx
+
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
+#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
+
#endif
extern pte_t *pkmap_page_table;
@@ -68,6 +75,10 @@ static inline void flush_cache_kmaps(void)
flush_cache_all();
}
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
+
void kmap_init(void);
+#endif /* CONFIG_HIGHMEM */
#endif