summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-01-23 11:37:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-01-23 11:37:19 -0800
commit4703d9119972bf586d2cca76ec6438f819ffa30e (patch)
treedcfa2b44c5b613252b974b42471f5220ca4e81b6 /include
parent34597c85be987cc731a840fa0c9bb969c92bd986 (diff)
parent00ed452c210a0bc1ff3ee79e1ce6b199f00a0638 (diff)
Merge tag 'xarray-5.5' of git://git.infradead.org/users/willy/linux-dax
Pull XArray fixes from Matthew Wilcox: "Primarily bugfixes, mostly around handling index wrap-around correctly. A couple of doc fixes and adding missing APIs. I had an oops live on stage at linux.conf.au this year, and it turned out to be a bug in xas_find() which I can't prove isn't triggerable in the current codebase. Then in looking for the bug, I spotted two more bugs. The bots have had a few days to chew on this with no problems reported, and it passes the test-suite (which now has more tests to make sure these problems don't come back)" * tag 'xarray-5.5' of git://git.infradead.org/users/willy/linux-dax: XArray: Add xa_for_each_range XArray: Fix xas_find returning too many entries XArray: Fix xa_find_after with multi-index entries XArray: Fix infinite loop with entry at ULONG_MAX XArray: Add wrappers for nested spinlocks XArray: Improve documentation of search marks XArray: Fix xas_pause at ULONG_MAX
Diffstat (limited to 'include')
-rw-r--r--include/linux/xarray.h45
1 files changed, 40 insertions, 5 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 86eecbd98e84..f73e1775ded0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -417,6 +417,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you
+ * want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last) \
+ for (index = start, \
+ entry = xa_find(xa, &index, last, XA_PRESENT); \
+ entry; \
+ entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
+/**
* xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
* @index: Index of @entry.
@@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
*
* Context: Any context. Takes and releases the RCU lock.
*/
-#define xa_for_each_start(xa, index, entry, start) \
- for (index = start, \
- entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
- entry; \
- entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+ xa_for_each_range(xa, index, entry, start, ULONG_MAX)
/**
* xa_for_each() - Iterate over present entries in an XArray.
@@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
spin_lock_irqsave(&(xa)->xa_lock, flags)
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+ spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+ spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+ spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+ spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
/*
* Versions of the normal API which require the caller to hold the