From dbe43d4d2837da3d11fd7f4e2ed1a395012fe6f5 Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Fri, 24 Feb 2017 14:58:50 -0800 Subject: mm: cma: print allocation failure reason and bitmap status There are many reasons of CMA allocation failure such as EBUSY, ENOMEM, EINTR. But we did not know error reason so far. This patch prints the error value. Additionally if CONFIG_CMA_DEBUG is enabled, this patch shows bitmap status to know available pages. Actually CMA internally tries on all available regions because some regions can be failed because of EBUSY. Bitmap status is useful to know in detail on both ENONEM and EBUSY; ENOMEM: not tried at all because of no available region it could be too small total region or could be fragmentation issue EBUSY: tried some region but all failed This is an ENOMEM example with this patch. [2: Binder:714_1: 744] cma: cma_alloc: alloc failed, req-size: 256 pages, ret: -12 If CONFIG_CMA_DEBUG is enabled, avabile pages also will be shown as concatenated size@position format. So 4@572 means that there are 4 available pages at 572 position starting from 0 position. [2: Binder:714_1: 744] cma: number of available pages: 4@572+7@585+7@601+8@632+38@730+166@1114+127@1921=> 357 free of 2048 total pages Link: http://lkml.kernel.org/r/1485909785-3952-1-git-send-email-jaewon31.kim@samsung.com Signed-off-by: Jaewon Kim Acked-by: Michal Nazarewicz Cc: Laura Abbott Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/cma.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'mm/cma.c') diff --git a/mm/cma.c b/mm/cma.c index 2906ae5a83ff..a6033e344430 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -348,6 +348,32 @@ err: return ret; } +#ifdef CONFIG_CMA_DEBUG +static void cma_debug_show_areas(struct cma *cma) +{ + unsigned long next_zero_bit, next_set_bit; + unsigned long start = 0; + unsigned int nr_zero, nr_total = 0; + + mutex_lock(&cma->lock); + pr_info("number of available pages: "); + for (;;) { + next_zero_bit = find_next_zero_bit(cma->bitmap, cma->count, start); + if (next_zero_bit >= cma->count) + break; + next_set_bit = find_next_bit(cma->bitmap, cma->count, next_zero_bit); + nr_zero = next_set_bit - next_zero_bit; + pr_cont("%s%u@%lu", nr_total ? "+" : "", nr_zero, next_zero_bit); + nr_total += nr_zero; + start = next_zero_bit + nr_zero; + } + pr_cont("=> %u free of %lu total pages\n", nr_total, cma->count); + mutex_unlock(&cma->lock); +} +#else +static inline void cma_debug_show_areas(struct cma *cma) { } +#endif + /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. @@ -365,7 +391,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; - int ret; + int ret = -ENOMEM; if (!cma || !cma->count) return NULL; @@ -423,6 +449,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, trace_cma_alloc(pfn, page, count, align); + if (ret) { + pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", + __func__, count, ret); + cma_debug_show_areas(cma); + } + pr_debug("%s(): returned %p\n", __func__, page); return page; } -- cgit