summaryrefslogtreecommitdiff
path: root/include/linux/prefetch.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/prefetch.h')
-rw-r--r--include/linux/prefetch.h16
1 files changed, 10 insertions, 6 deletions
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index a3bfbdf63d32..b068e2e60939 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Generic cache management functions. Everything is arch-specific,
* but this header exists to make sure the defines/functions can be
@@ -14,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -23,11 +25,10 @@
prefetch() should be defined by the architecture, if not, the
#define below provides a no-op define.
- There are 3 prefetch() macros:
+ There are 2 prefetch() macros:
prefetch(x) - prefetches the cacheline at "x" for read
prefetchw(x) - prefetches the cacheline at "x" for write
- spin_lock_prefetch(x) - prefetches the spinlock *x for taking
there is also PREFETCH_STRIDE which is the architecure-preferred
"lookahead" size for prefetching streamed operations.
@@ -42,10 +43,6 @@
#define prefetchw(x) __builtin_prefetch(x,1)
#endif
-#ifndef ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) prefetchw(x)
-#endif
-
#ifndef PREFETCH_STRIDE
#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
#endif
@@ -61,4 +58,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif