summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorByongho Lee <bhlee.kernel@gmail.com>2018-01-31 16:15:36 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 17:18:35 -0800
commit692ae74aaf226a557d88d5412a1764c09e63a193 (patch)
tree1cc9dd8a77db15cbc5981a4e6d6f332bc6f10d7a /mm/slab_common.c
parentd984187e3a1ad7d12447a7ab2c43ce3717a2b5b3 (diff)
mm/slab_common.c: make calculate_alignment() static
calculate_alignment() function is only used inside slab_common.c. So make it static and let the compiler do more optimizations. After this patch there's a small improvement in text and data size. $ gcc --version gcc (GCC) 7.2.1 20171128 Before: text data bss dec hex filename 9890457 3828702 1212364 14931523 e3d643 vmlinux After: text data bss dec hex filename 9890437 3828670 1212364 14931471 e3d60f vmlinux Also I fixed a style problem reported by checkpatch. WARNING: Missing a blank line after declarations #53: FILE: mm/slab_common.c:286: + unsigned long ralign = cache_line_size(); + while (size <= ralign / 2) Link: http://lkml.kernel.org/r/20171210080132.406-1-bhlee.kernel@gmail.com Signed-off-by: Byongho Lee <bhlee.kernel@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c8cb36774ba1..deeddf95cdcf 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -268,6 +268,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
/*
+ * Figure out what the alignment of the objects will be given a set of
+ * flags, a user specified alignment and the size of the objects.
+ */
+static unsigned long calculate_alignment(unsigned long flags,
+ unsigned long align, unsigned long size)
+{
+ /*
+ * If the user wants hardware cache aligned objects then follow that
+ * suggestion if the object is sufficiently large.
+ *
+ * The hardware cache alignment cannot override the specified
+ * alignment though. If that is greater then use it.
+ */
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign;
+
+ ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
+
+ if (align < ARCH_SLAB_MINALIGN)
+ align = ARCH_SLAB_MINALIGN;
+
+ return ALIGN(align, sizeof(void *));
+}
+
+/*
* Find a mergeable slab cache
*/
int slab_unmergeable(struct kmem_cache *s)
@@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
return NULL;
}
-/*
- * Figure out what the alignment of the objects will be given a set of
- * flags, a user specified alignment and the size of the objects.
- */
-unsigned long calculate_alignment(slab_flags_t flags,
- unsigned long align, unsigned long size)
-{
- /*
- * If the user wants hardware cache aligned objects then follow that
- * suggestion if the object is sufficiently large.
- *
- * The hardware cache alignment cannot override the specified
- * alignment though. If that is greater then use it.
- */
- if (flags & SLAB_HWCACHE_ALIGN) {
- unsigned long ralign = cache_line_size();
- while (size <= ralign / 2)
- ralign /= 2;
- align = max(align, ralign);
- }
-
- if (align < ARCH_SLAB_MINALIGN)
- align = ARCH_SLAB_MINALIGN;
-
- return ALIGN(align, sizeof(void *));
-}
-
static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
slab_flags_t flags, void (*ctor)(void *),