summaryrefslogtreecommitdiff
path: root/fs/ceph/snap.c
diff options
context:
space:
mode:
authorXiubo Li <xiubli@redhat.com>2022-02-15 20:23:14 +0800
committerIlya Dryomov <idryomov@gmail.com>2022-03-01 18:26:37 +0100
commitab58a5a1c0487b67f7409f39d3c8593d416d4e7f (patch)
tree5f31cd00559f75dc0fc46dc6ffcc7fe84c248e4b /fs/ceph/snap.c
parent6ddf5f165f13ab623d04aee2a473d35818255199 (diff)
ceph: move to a dedicated slabcache for ceph_cap_snap
There could be huge number of capsnaps around at any given time. On x86_64 the structure is 248 bytes, which will be rounded up to 256 bytes by kzalloc. Move this to a dedicated slabcache to save 8 bytes for each. [ jlayton: use kmem_cache_zalloc ] Signed-off-by: Xiubo Li <xiubli@redhat.com> Signed-off-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'fs/ceph/snap.c')
-rw-r--r--fs/ceph/snap.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index b41e6724c591..bc5ec72d958c 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -482,7 +482,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
struct ceph_buffer *old_blob = NULL;
int used, dirty;
- capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
+ capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
if (!capsnap) {
pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
return;
@@ -603,7 +603,8 @@ update_snapc:
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob);
- kfree(capsnap);
+ if (capsnap)
+ kmem_cache_free(ceph_cap_snap_cachep, capsnap);
ceph_put_snap_context(old_snapc);
}