summaryrefslogtreecommitdiff
path: root/fs/bcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-07-06 22:47:42 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:10:06 -0400
commit73bd774d28d2b2e6a05c31bf7afb9247e02a8e49 (patch)
tree21ab80d4b966d92647ea178bc5707a8534e5d123 /fs/bcachefs
parent236b68da5017b5336b332f941323a5bc450594b3 (diff)
bcachefs: Assorted sparse fixes
- endianness fixes - mark some things static - fix a few __percpu annotations - fix silent enum conversions Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r--fs/bcachefs/acl.c10
-rw-r--r--fs/bcachefs/alloc_background.c4
-rw-r--r--fs/bcachefs/backpointers.c12
-rw-r--r--fs/bcachefs/bcachefs_format.h8
-rw-r--r--fs/bcachefs/bkey_methods.c2
-rw-r--r--fs/bcachefs/bset.c9
-rw-r--r--fs/bcachefs/btree_cache.h6
-rw-r--r--fs/bcachefs/btree_gc.c4
-rw-r--r--fs/bcachefs/btree_io.c4
-rw-r--r--fs/bcachefs/btree_io.h6
-rw-r--r--fs/bcachefs/btree_iter.c10
-rw-r--r--fs/bcachefs/btree_iter.h4
-rw-r--r--fs/bcachefs/btree_locking.c7
-rw-r--r--fs/bcachefs/btree_locking.h6
-rw-r--r--fs/bcachefs/btree_types.h5
-rw-r--r--fs/bcachefs/btree_update_interior.c2
-rw-r--r--fs/bcachefs/btree_update_leaf.c2
-rw-r--r--fs/bcachefs/buckets.c6
-rw-r--r--fs/bcachefs/checksum.c8
-rw-r--r--fs/bcachefs/counters.c4
-rw-r--r--fs/bcachefs/dirent.c2
-rw-r--r--fs/bcachefs/ec.c18
-rw-r--r--fs/bcachefs/extents.c8
-rw-r--r--fs/bcachefs/extents.h8
-rw-r--r--fs/bcachefs/fs-io.c6
-rw-r--r--fs/bcachefs/fsck.c8
-rw-r--r--fs/bcachefs/io.c2
-rw-r--r--fs/bcachefs/journal_reclaim.c2
-rw-r--r--fs/bcachefs/journal_sb.c8
-rw-r--r--fs/bcachefs/quota.c8
-rw-r--r--fs/bcachefs/recovery.c6
-rw-r--r--fs/bcachefs/replicas.c6
-rw-r--r--fs/bcachefs/subvolume.c6
-rw-r--r--fs/bcachefs/super.c6
-rw-r--r--fs/bcachefs/sysfs.c2
-rw-r--r--fs/bcachefs/tests.c2
-rw-r--r--fs/bcachefs/varint.c11
-rw-r--r--fs/bcachefs/xattr.c5
38 files changed, 115 insertions, 118 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index ce7a460fb308..b1a488860678 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -225,6 +225,7 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
struct btree_trans trans;
struct btree_iter iter = { NULL };
struct bkey_s_c_xattr xattr;
@@ -237,9 +238,7 @@ retry:
bch2_trans_begin(&trans);
ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc,
- &hash, inode_inum(inode),
- &X_SEARCH(acl_to_xattr_type(type), "", 0),
- 0);
+ &hash, inode_inum(inode), &search, 0);
if (ret) {
if (!bch2_err_matches(ret, ENOENT))
acl = ERR_PTR(ret);
@@ -364,6 +363,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
struct posix_acl **new_acl)
{
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
+ struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_i_xattr *new;
@@ -372,9 +372,7 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash_info, inum,
- &X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0),
- BTREE_ITER_INTENT);
+ &hash_info, inum, &search, BTREE_ITER_INTENT);
if (ret)
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 81e80f36af43..782086afde54 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -929,7 +929,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
* extents style btrees, but works on non-extents btrees:
*/
-struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
{
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
@@ -1000,7 +1000,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
return ca != NULL;
}
-struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
{
struct bch_fs *c = iter->trans->c;
struct bkey_s_c k;
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 20a4c3f071bf..760c4cc16a50 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -590,10 +590,10 @@ static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
return div_u64(mem_bytes >> 1, btree_bytes(c));
}
-int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
- unsigned btree_leaf_mask,
- unsigned btree_interior_mask,
- struct bbpos start, struct bbpos *end)
+static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
+ unsigned btree_leaf_mask,
+ unsigned btree_interior_mask,
+ struct bbpos start, struct bbpos *end)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -691,8 +691,8 @@ static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
: bucket;
}
-int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
- struct bpos start, struct bpos *end)
+static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
+ struct bpos start, struct bpos *end)
{
struct btree_iter alloc_iter;
struct btree_iter bp_iter;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 4401d27675ed..49b86bfda76b 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -1371,19 +1371,19 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
- __u8 devs[];
+ __u8 devs[0];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
- struct bch_replicas_entry_v0 entries[];
+ struct bch_replicas_entry_v0 entries[0];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
- __u8 devs[];
+ __u8 devs[0];
} __packed;
#define replicas_entry_bytes(_i) \
@@ -1391,7 +1391,7 @@ struct bch_replicas_entry {
struct bch_sb_field_replicas {
struct bch_sb_field field;
- struct bch_replicas_entry entries[];
+ struct bch_replicas_entry entries[0];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 59a4f4802ee9..985ea2daa886 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -480,7 +480,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
u->k.p.snapshot = write
? 0 : U32_MAX;
} else {
- u64 min_packed = f->field_offset[BKEY_FIELD_SNAPSHOT];
+ u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]);
u64 max_packed = min_packed +
~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 4d55011551e0..bcdf28f39b9c 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -599,11 +599,10 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k,
return (u16) v;
}
-__always_inline
-static inline void make_bfloat(struct btree *b, struct bset_tree *t,
- unsigned j,
- struct bkey_packed *min_key,
- struct bkey_packed *max_key)
+static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
+ unsigned j,
+ struct bkey_packed *min_key,
+ struct bkey_packed *max_key)
{
struct bkey_float *f = bkey_float(b, t, j);
struct bkey_packed *m = tree_to_bkey(b, t, j);
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 4c11975208b3..1e562b6efa62 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -45,7 +45,11 @@ static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
case KEY_TYPE_btree_ptr:
return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
case KEY_TYPE_btree_ptr_v2:
- return bkey_i_to_btree_ptr_v2_c(k)->v.seq;
+ /*
+ * The cast/deref is only necessary to avoid sparse endianness
+ * warnings:
+ */
+ return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
default:
return 0;
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 9018a2a15212..ac6c748e0f7c 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -1229,7 +1229,7 @@ static int bch2_gc_done(struct bch_fs *c,
for_each_member_device(ca, c, dev) {
struct bch_dev_usage *dst = ca->usage_base;
struct bch_dev_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) ca->usage_gc,
+ bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
dev_usage_u64s());
copy_dev_field(buckets_ec, "buckets_ec");
@@ -1245,7 +1245,7 @@ static int bch2_gc_done(struct bch_fs *c,
unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = c->usage_base;
struct bch_fs_usage *src = (void *)
- bch2_acc_percpu_u64s((void *) c->usage_gc, nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 4ece3f684ef0..a8197c500894 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1281,7 +1281,7 @@ struct btree_node_read_all {
unsigned nr;
void *buf[BCH_REPLICAS_MAX];
struct bio *bio[BCH_REPLICAS_MAX];
- int err[BCH_REPLICAS_MAX];
+ blk_status_t err[BCH_REPLICAS_MAX];
};
static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
@@ -2230,7 +2230,7 @@ bool bch2_btree_flush_all_writes(struct bch_fs *c)
return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
}
-const char * const bch2_btree_write_types[] = {
+static const char * const bch2_btree_write_types[] = {
#define x(t, n) [n] = #t,
BCH_BTREE_WRITE_TYPES()
NULL
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index c43fb60b8c82..0cadf651e7cf 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -178,7 +178,7 @@ static inline void compat_bformat(unsigned level, enum btree_id btree_id,
f->field_offset[BKEY_FIELD_SNAPSHOT] = write
? 0
- : U32_MAX - max_packed;
+ : cpu_to_le64(U32_MAX - max_packed);
}
}
@@ -200,7 +200,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
struct btree_node *bn)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
write)
bn->min_key = bpos_nosnap_predecessor(bn->min_key);
@@ -217,7 +217,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
bn->max_key.snapshot = U32_MAX;
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bpos_eq(bn->min_key, POS_MIN) &&
!write)
bn->min_key = bpos_nosnap_successor(bn->min_key);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index c7b20baa1fd6..d16331620ab9 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1438,7 +1438,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
prt_newline(out);
}
-noinline __cold
+static noinline __cold
void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
bool nosort)
{
@@ -1458,7 +1458,7 @@ void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
__bch2_trans_paths_to_text(out, trans, false);
}
-noinline __cold
+static noinline __cold
void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
{
struct printbuf buf = PRINTBUF;
@@ -1867,9 +1867,9 @@ static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
: NULL;
}
-struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bpos end_pos)
+static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end_pos)
{
struct bkey_i *k;
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 9ef9527dda6b..63260f68bc67 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -283,7 +283,7 @@ static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
}
__always_inline
-static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
+static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
BUG_ON(err <= 0);
BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
@@ -294,7 +294,7 @@ static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int er
}
__always_inline
-static inline int btree_trans_restart(struct btree_trans *trans, int err)
+static int btree_trans_restart(struct btree_trans *trans, int err)
{
btree_trans_restart_nounlock(trans, err);
return -err;
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index e6fe2a987574..dfdf46eb3e6d 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -598,13 +598,6 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
return 0;
}
-__flatten
-bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
- struct btree_path *path, unsigned long trace_ip)
-{
- return btree_path_get_locks(trans, path, true);
-}
-
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 3b537e451d2c..78daa494c914 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -93,7 +93,7 @@ static inline void mark_btree_node_locked(struct btree_trans *trans,
unsigned level,
enum six_lock_type type)
{
- mark_btree_node_locked_noreset(path, level, type);
+ mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[level].lock_taken_time = local_clock();
#endif
@@ -246,7 +246,7 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
trans_for_each_path(trans, path)
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
- six_lock_increment(&b->lock, want);
+ six_lock_increment(&b->lock, (enum six_lock_type) want);
return true;
}
@@ -266,7 +266,7 @@ static inline int btree_node_lock(struct btree_trans *trans,
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->lock, type)) ||
- btree_node_lock_increment(trans, b, level, type) ||
+ btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
!(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->level].lock_taken_time = local_clock();
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index fc8a3326451f..937f9c2b63ed 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -681,6 +681,11 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
return (1U << type) & BTREE_ID_IS_EXTENTS;
}
+static inline bool btree_id_is_extents(enum btree_id btree)
+{
+ return btree_node_type_is_extents((enum btree_node_type) btree);
+}
+
#define BTREE_ID_HAS_SNAPSHOTS \
((1U << BTREE_ID_extents)| \
(1U << BTREE_ID_inodes)| \
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 490c41e8f8fe..5592feff79d1 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -2036,7 +2036,7 @@ out:
return ret;
}
-void async_btree_node_rewrite_work(struct work_struct *work)
+static void async_btree_node_rewrite_work(struct work_struct *work)
{
struct async_btree_rewrite *a =
container_of(work, struct async_btree_rewrite, work);
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index ad058b9252e1..1474dca26dde 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -29,7 +29,7 @@
* bch2_btree_path_peek_slot() for a cached iterator might return a key in a
* different snapshot:
*/
-struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
+static struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
{
struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 48fdd5f96a3b..797ef5eceb3f 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -374,7 +374,7 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
struct bch_replicas_entry *r, s64 sectors,
unsigned journal_seq, bool gc)
{
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
int idx, ret = 0;
struct printbuf buf = PRINTBUF;
@@ -1143,7 +1143,7 @@ int bch2_mark_inode(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
u64 journal_seq = trans->journal_res.seq;
if (flags & BTREE_TRIGGER_INSERT) {
@@ -1176,7 +1176,7 @@ int bch2_mark_reservation(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
- struct bch_fs_usage __percpu *fs_usage;
+ struct bch_fs_usage *fs_usage;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size;
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 843e138862f6..f2a56d786024 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -360,7 +360,7 @@ struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
state.type = type;
bch2_checksum_init(&state);
- state.seed = a.lo;
+ state.seed = (u64 __force) a.lo;
BUG_ON(!bch2_checksum_mergeable(type));
@@ -371,7 +371,7 @@ struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
page_address(ZERO_PAGE(0)), b);
b_len -= b;
}
- a.lo = bch2_checksum_final(&state);
+ a.lo = (__le64 __force) bch2_checksum_final(&state);
a.lo ^= b.lo;
a.hi ^= b.hi;
return a;
@@ -597,7 +597,7 @@ int bch2_disable_encryption(struct bch_fs *c)
if (ret)
goto out;
- crypt->key.magic = BCH_KEY_MAGIC;
+ crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
crypt->key.key = key;
SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
@@ -625,7 +625,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
if (ret)
goto err;
- key.magic = BCH_KEY_MAGIC;
+ key.magic = cpu_to_le64(BCH_KEY_MAGIC);
get_random_bytes(&key.key, sizeof(key.key));
if (keyed) {
diff --git a/fs/bcachefs/counters.c b/fs/bcachefs/counters.c
index e5587bc5a2b7..442a9b806a3c 100644
--- a/fs/bcachefs/counters.c
+++ b/fs/bcachefs/counters.c
@@ -5,7 +5,7 @@
/* BCH_SB_FIELD_counters */
-const char * const bch2_counter_names[] = {
+static const char * const bch2_counter_names[] = {
#define x(t, n, ...) (#t),
BCH_PERSISTENT_COUNTERS()
#undef x
@@ -27,7 +27,7 @@ static int bch2_sb_counters_validate(struct bch_sb *sb,
return 0;
};
-void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
+static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index 610dd7425fb4..ef3f1f9b7e8d 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -219,7 +219,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
int ret = 0;
if (d.v->d_type == DT_SUBVOL &&
- d.v->d_parent_subvol != dir.subvol)
+ le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
return 1;
if (likely(d.v->d_type != DT_SUBVOL)) {
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 8d091c4a0173..e0d49fe49310 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -385,7 +385,7 @@ static void ec_block_endio(struct bio *bio)
}
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
- unsigned rw, unsigned idx, struct closure *cl)
+ blk_opf_t opf, unsigned idx, struct closure *cl)
{
struct bch_stripe *v = &buf->key.v;
unsigned offset = 0, bytes = buf->size << 9;
@@ -394,6 +394,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
? BCH_DATA_user
: BCH_DATA_parity;
+ int rw = op_is_write(opf);
if (ptr_stale(ca, ptr)) {
bch_err_ratelimited(c,
@@ -419,7 +420,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
nr_iovecs,
- rw,
+ opf,
GFP_KERNEL,
&c->ec_bioset),
struct ec_bio, bio);
@@ -1380,11 +1381,12 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
mutex_unlock(&h->lock);
}
-struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
- unsigned target,
- unsigned algo,
- unsigned redundancy,
- enum bch_watermark watermark)
+static struct ec_stripe_head *
+__bch2_ec_stripe_head_get(struct btree_trans *trans,
+ unsigned target,
+ unsigned algo,
+ unsigned redundancy,
+ enum bch_watermark watermark)
{
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
@@ -1570,7 +1572,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
}
BUG_ON(h->s->existing_stripe.size != h->blocksize);
- BUG_ON(h->s->existing_stripe.size != h->s->existing_stripe.key.v.sectors);
+ BUG_ON(h->s->existing_stripe.size != le16_to_cpu(h->s->existing_stripe.key.v.sectors));
/*
* Free buckets we initially allocated - they might conflict with
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 753a846eaf81..4e89a8be5cb7 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -216,7 +216,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
if (version < bcachefs_metadata_version_inode_btree_change &&
- btree_node_type_is_extents(btree_id) &&
+ btree_id_is_extents(btree_id) &&
!bkey_eq(bp.v->min_key, POS_MIN))
bp.v->min_key = write
? bpos_nosnap_predecessor(bp.v->min_key)
@@ -514,13 +514,13 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
switch (type) {
case BCH_EXTENT_ENTRY_crc32:
set_common_fields(dst->crc32, src);
- dst->crc32.csum = *((__le32 *) &src.csum.lo);
+ dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
break;
case BCH_EXTENT_ENTRY_crc64:
set_common_fields(dst->crc64, src);
dst->crc64.nonce = src.nonce;
- dst->crc64.csum_lo = src.csum.lo;
- dst->crc64.csum_hi = *((__le16 *) &src.csum.hi);
+ dst->crc64.csum_lo = (u64 __force) src.csum.lo;
+ dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
break;
case BCH_EXTENT_ENTRY_crc128:
set_common_fields(dst->crc128, src);
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index c573a40d366a..f6411d63f298 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -154,11 +154,7 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
common_fields(crc->crc32),
};
- *((__le32 *) &ret.csum.lo) = crc->crc32.csum;
-
- memcpy(&ret.csum.lo, &crc->crc32.csum,
- sizeof(crc->crc32.csum));
-
+ *((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
return ret;
}
case BCH_EXTENT_ENTRY_crc64: {
@@ -168,7 +164,7 @@ bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
.csum.lo = (__force __le64) crc->crc64.csum_lo,
};
- *((__le16 *) &ret.csum.hi) = crc->crc64.csum_hi;
+ *((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
return ret;
}
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 103f426c88e8..513ffb5d968b 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -458,7 +458,7 @@ enum bch_folio_sector_state {
#undef x
};
-const char * const bch2_folio_sector_states[] = {
+static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
@@ -997,7 +997,7 @@ vm_fault_t bch2_page_fault(struct vm_fault *vmf)
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
- int ret;
+ vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
@@ -1039,7 +1039,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
- int ret;
+ vm_fault_t ret;
bch2_folio_reservation_init(c, inode, &res);
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 674018a58de5..98fde0bf6edc 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -1696,8 +1696,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
if (fsck_err_on(ret, c,
- "dirent points to missing subvolume %llu",
- le64_to_cpu(d.v->d_child_subvol))) {
+ "dirent points to missing subvolume %u",
+ le32_to_cpu(d.v->d_child_subvol))) {
ret = __remove_dirent(trans, d.k->p);
goto err;
}
@@ -2238,7 +2238,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
* Backpointer and directory structure checks are sufficient for
* directories, since they can't have hardlinks:
*/
- if (S_ISDIR(le16_to_cpu(u.bi_mode)))
+ if (S_ISDIR(u.bi_mode))
continue;
if (!u.bi_nlink)
@@ -2324,7 +2324,7 @@ static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_ite
BUG_ON(bch2_inode_unpack(k, &u));
- if (S_ISDIR(le16_to_cpu(u.bi_mode)))
+ if (S_ISDIR(u.bi_mode))
return 0;
if (!u.bi_nlink)
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index 7db94a8cb7ff..33762e4a0f05 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1878,7 +1878,7 @@ err:
op->end_io(op);
}
-const char * const bch2_write_flags[] = {
+static const char * const bch2_write_flags[] = {
#define x(f) #f,
BCH_WRITE_FLAGS()
#undef x
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index 5174b9497721..72486f1f8a8e 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -361,7 +361,7 @@ void bch2_journal_pin_drop(struct journal *j,
spin_unlock(&j->lock);
}
-enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
+static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
{
if (fn == bch2_btree_node_flush0 ||
fn == bch2_btree_node_flush1)
diff --git a/fs/bcachefs/journal_sb.c b/fs/bcachefs/journal_sb.c
index fcefbbe7eda8..cc41bff86d6b 100644
--- a/fs/bcachefs/journal_sb.c
+++ b/fs/bcachefs/journal_sb.c
@@ -201,16 +201,16 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca,
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
- j->d[dst].start = le64_to_cpu(buckets[0]);
- j->d[dst].nr = le64_to_cpu(1);
+ j->d[dst].start = cpu_to_le64(buckets[0]);
+ j->d[dst].nr = cpu_to_le64(1);
for (i = 1; i < nr; i++) {
if (buckets[i] == buckets[i - 1] + 1) {
le64_add_cpu(&j->d[dst].nr, 1);
} else {
dst++;
- j->d[dst].start = le64_to_cpu(buckets[i]);
- j->d[dst].nr = le64_to_cpu(1);
+ j->d[dst].start = cpu_to_le64(buckets[i]);
+ j->d[dst].nr = cpu_to_le64(1);
}
}
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 7e1f1828ab20..1decb7191da2 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -480,13 +480,13 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
}
if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
- mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer);
+ mq->c[Q_SPC].timer = qdq->d_spc_timer;
if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
- mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns);
+ mq->c[Q_SPC].warns = qdq->d_spc_warns;
if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
- mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer);
+ mq->c[Q_INO].timer = qdq->d_ino_timer;
if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
- mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns);
+ mq->c[Q_INO].warns = qdq->d_ino_warns;
mutex_unlock(&q->lock);
}
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 268fae9e7bf9..9b49a6bc6702 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -308,7 +308,7 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
}
}
-struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
+static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
{
struct journal_key *k = iter->keys->d + iter->idx;
@@ -1042,7 +1042,7 @@ static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
root_snapshot.k.p.offset = U32_MAX;
root_snapshot.v.flags = 0;
root_snapshot.v.parent = 0;
- root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
+ root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
root_snapshot.v.tree = cpu_to_le32(1);
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
@@ -1468,7 +1468,7 @@ use_clean:
if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
- le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
+ c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
struct bch_move_stats stats;
bch2_move_stats_init(&stats, "recovery");
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 76efbfce7683..d4c1d43e8c41 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -36,8 +36,8 @@ static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
}
-void bch2_replicas_entry_v0_to_text(struct printbuf *out,
- struct bch_replicas_entry_v0 *e)
+static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
+ struct bch_replicas_entry_v0 *e)
{
unsigned i;
@@ -272,7 +272,7 @@ static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
{
unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
struct bch_fs_usage *dst, *src = (void *)
- bch2_acc_percpu_u64s((void *) src_p, src_nr);
+ bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
preempt_disable();
dst = this_cpu_ptr(dst_p);
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index 89c7c83c50e8..341c0d1b81d3 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -825,7 +825,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
goto err;
if (s.v->children[0]) {
- s_t->v.root_snapshot = cpu_to_le32(s.v->children[0]);
+ s_t->v.root_snapshot = s.v->children[0];
} else {
s_t->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&s_t->k, 0);
@@ -1328,7 +1328,7 @@ static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
__bch2_subvolume_delete(trans, subvolid));
}
-void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
+static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs,
snapshot_wait_for_pagecache_and_delete_work);
@@ -1366,7 +1366,7 @@ struct subvolume_unlink_hook {
u32 subvol;
};
-int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
+static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
struct btree_trans_commit_hook *_h)
{
struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 7ecbc23af1a1..061a1518f28c 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -754,11 +754,11 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
goto err;
/* Compat: */
- if (sb->version <= bcachefs_metadata_version_inode_v2 &&
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
- if (sb->version <= bcachefs_metadata_version_inode_v2 &&
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
@@ -1999,7 +1999,7 @@ err:
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
-unsigned bch2_metadata_version = bcachefs_metadata_version_current;
+static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
module_param_named(version, bch2_metadata_version, uint, 0400);
module_exit(bcachefs_exit);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 364cbcd2654e..ef02e346e334 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -202,7 +202,7 @@ read_attribute(nocow_lock_table);
#ifdef BCH_WRITE_REF_DEBUG
read_attribute(write_refs);
-const char * const bch2_write_refs[] = {
+static const char * const bch2_write_refs[] = {
#define x(n) #n,
BCH_WRITE_REFS()
#undef x
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 50d69a5634bd..cef23d2ccc5f 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -444,7 +444,7 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
/* extent unit tests */
-u64 test_version;
+static u64 test_version;
static int insert_test_extent(struct bch_fs *c,
u64 start, u64 end)
diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c
index 5143b603bf67..ef030fc02448 100644
--- a/fs/bcachefs/varint.c
+++ b/fs/bcachefs/varint.c
@@ -22,12 +22,13 @@ int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
unsigned bytes = DIV_ROUND_UP(bits, 7);
+ __le64 v_le;
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
- v = cpu_to_le64(v);
- memcpy(out, &v, bytes);
+ v_le = cpu_to_le64(v);
+ memcpy(out, &v_le, bytes);
} else {
*out++ = 255;
bytes = 9;
@@ -57,9 +58,9 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
return -1;
if (likely(bytes < 9)) {
- v = 0;
- memcpy(&v, in, bytes);
- v = le64_to_cpu(v);
+ __le64 v_le = 0;
+ memcpy(&v_le, in, bytes);
+ v = le64_to_cpu(v_le);
v >>= bytes;
} else {
v = get_unaligned_le64(++in);
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 05c65d94c00f..f47a085d1434 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -135,15 +135,14 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
const char *name, void *buffer, size_t size, int type)
{
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
struct btree_iter iter;
struct bkey_s_c_xattr xattr;
struct bkey_s_c k;
int ret;
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
- inode_inum(inode),
- &X_SEARCH(type, name, strlen(name)),
- 0);
+ inode_inum(inode), &search, 0);
if (ret)
goto err1;