From b460bc8302f222d346f0c15bba980eb8c36d6278 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:31:57 +0800 Subject: mm/percpu.c: introduce pcpu_alloc_size() Introduce pcpu_alloc_size() to get the size of the dynamic per-cpu area. It will be used by bpf memory allocator in the following patches. BPF memory allocator maintains per-cpu area caches for multiple area sizes and its free API only has the to-be-freed per-cpu pointer, so it needs the size of dynamic per-cpu area to select the corresponding cache when bpf program frees the dynamic per-cpu pointer. Acked-by: Dennis Zhou Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- mm/percpu.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'mm/percpu.c') diff --git a/mm/percpu.c b/mm/percpu.c index ea607078368d..60ed078e4cd0 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2244,6 +2244,37 @@ static void pcpu_balance_workfn(struct work_struct *work) mutex_unlock(&pcpu_alloc_mutex); } +/** + * pcpu_alloc_size - the size of the dynamic percpu area + * @ptr: pointer to the dynamic percpu area + * + * Returns the size of the @ptr allocation. This is undefined for statically + * defined percpu variables as there is no corresponding chunk->bound_map. + * + * RETURNS: + * The size of the dynamic percpu area. + * + * CONTEXT: + * Can be called from atomic context. + */ +size_t pcpu_alloc_size(void __percpu *ptr) +{ + struct pcpu_chunk *chunk; + unsigned long bit_off, end; + void *addr; + + if (!ptr) + return 0; + + addr = __pcpu_ptr_to_addr(ptr); + /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */ + chunk = pcpu_chunk_addr_search(addr); + bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE; + end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), + bit_off + 1); + return (end - bit_off) * PCPU_MIN_ALLOC_SIZE; +} + /** * free_percpu - free percpu area * @ptr: pointer to area to free -- cgit