summaryrefslogtreecommitdiff
path: root/fs/btrfs/misc.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/misc.h')
-rw-r--r--fs/btrfs/misc.h172
1 files changed, 138 insertions, 34 deletions
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 6461ebc3a1c1..12c5a9d6564f 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -3,12 +3,78 @@
#ifndef BTRFS_MISC_H
#define BTRFS_MISC_H
+#include <linux/types.h>
+#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/wait.h>
-#include <asm/div64.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/math64.h>
#include <linux/rbtree.h>
+#include <linux/bio.h>
-#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
+/*
+ * Convenience macros to define a pointer with the __free(kfree) and
+ * __free(kvfree) cleanup attributes and initialized to NULL.
+ */
+#define AUTO_KFREE(name) *name __free(kfree) = NULL
+#define AUTO_KVFREE(name) *name __free(kvfree) = NULL
+
+/*
+ * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
+ */
+#define ENUM_BIT(name) \
+ __ ## name ## _BIT, \
+ name = (1U << __ ## name ## _BIT), \
+ __ ## name ## _SEQ = __ ## name ## _BIT
+
+static inline phys_addr_t bio_iter_phys(struct bio *bio, struct bvec_iter *iter)
+{
+ struct bio_vec bv = bio_iter_iovec(bio, *iter);
+
+ return bvec_phys(&bv);
+}
+
+/*
+ * Iterate bio using btrfs block size.
+ *
+ * This will handle large folio and highmem.
+ *
+ * @paddr: Physical memory address of each iteration
+ * @bio: The bio to iterate
+ * @iter: The bvec_iter (pointer) to use.
+ * @blocksize: The blocksize to iterate.
+ *
+ * This requires all folios in the bio to cover at least one block.
+ */
+#define btrfs_bio_for_each_block(paddr, bio, iter, blocksize) \
+ for (; (iter)->bi_size && \
+ (paddr = bio_iter_phys((bio), (iter)), 1); \
+ bio_advance_iter_single((bio), (iter), (blocksize)))
+
+/* Initialize a bvec_iter to the size of the specified bio. */
+static inline struct bvec_iter init_bvec_iter_for_bio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ u32 bio_size = 0;
+ int i;
+
+ bio_for_each_bvec_all(bvec, bio, i)
+ bio_size += bvec->bv_len;
+
+ return (struct bvec_iter) {
+ .bi_sector = 0,
+ .bi_size = bio_size,
+ .bi_idx = 0,
+ .bi_bvec_done = 0,
+ };
+}
+
+#define btrfs_bio_for_each_block_all(paddr, bio, blocksize) \
+ for (struct bvec_iter iter = init_bvec_iter_for_bio(bio); \
+ (iter).bi_size && \
+ (paddr = bio_iter_phys((bio), &(iter)), 1); \
+ bio_advance_iter_single((bio), &(iter), (blocksize)))
static inline void cond_wake_up(struct wait_queue_head *wq)
{
@@ -32,22 +98,10 @@ static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
wake_up(wq);
}
-static inline u64 div_factor(u64 num, int factor)
+static inline u64 mult_perc(u64 num, u32 percent)
{
- if (factor == 10)
- return num;
- num *= factor;
- return div_u64(num, 10);
+ return div_u64(num * percent, 100);
}
-
-static inline u64 div_factor_fine(u64 num, int factor)
-{
- if (factor == 100)
- return num;
- num *= factor;
- return div_u64(num, 100);
-}
-
/* Copy of is_power_of_two that is 64bit safe */
static inline bool is_power_of_two_u64(u64 n)
{
@@ -70,7 +124,7 @@ struct rb_simple_node {
u64 bytenr;
};
-static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
+static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr)
{
struct rb_node *node = root->rb_node;
struct rb_simple_node *entry;
@@ -88,28 +142,78 @@ static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
return NULL;
}
-static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
+/*
+ * Search @root from an entry that starts or comes after @bytenr.
+ *
+ * @root: the root to search.
+ * @bytenr: bytenr to search from.
+ *
+ * Return the rb_node that start at or after @bytenr. If there is no entry at
+ * or after @bytner return NULL.
+ */
+static inline struct rb_node *rb_simple_search_first(const struct rb_root *root,
+ u64 bytenr)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct rb_simple_node *entry;
+ struct rb_node *node = root->rb_node, *ret = NULL;
+ struct rb_simple_node *entry, *ret_entry = NULL;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct rb_simple_node, rb_node);
+ while (node) {
+ entry = rb_entry(node, struct rb_simple_node, rb_node);
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
+ if (bytenr < entry->bytenr) {
+ if (!ret || entry->bytenr < ret_entry->bytenr) {
+ ret = node;
+ ret_entry = entry;
+ }
+
+ node = node->rb_left;
+ } else if (bytenr > entry->bytenr) {
+ node = node->rb_right;
+ } else {
+ return node;
+ }
}
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
+ return ret;
+}
+
+static int rb_simple_node_bytenr_cmp(struct rb_node *new, const struct rb_node *existing)
+{
+ struct rb_simple_node *new_entry = rb_entry(new, struct rb_simple_node, rb_node);
+ struct rb_simple_node *existing_entry = rb_entry(existing, struct rb_simple_node, rb_node);
+
+ if (new_entry->bytenr < existing_entry->bytenr)
+ return -1;
+ else if (new_entry->bytenr > existing_entry->bytenr)
+ return 1;
+
+ return 0;
+}
+
+static inline struct rb_node *rb_simple_insert(struct rb_root *root,
+ struct rb_simple_node *simple_node)
+{
+ return rb_find_add(&simple_node->rb_node, root, rb_simple_node_bytenr_cmp);
+}
+
+static inline bool bitmap_test_range_all_set(const unsigned long *addr,
+ unsigned long start,
+ unsigned long nbits)
+{
+ unsigned long found_zero;
+
+ found_zero = find_next_zero_bit(addr, start + nbits, start);
+ return (found_zero == start + nbits);
+}
+
+static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
+ unsigned long start,
+ unsigned long nbits)
+{
+ unsigned long found_set;
+
+ found_set = find_next_bit(addr, start + nbits, start);
+ return (found_set == start + nbits);
}
#endif