summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 09:16:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-05 09:16:18 -0800
commita65981109f294ba7e64b33ad3b4575a4636fce66 (patch)
tree1061a49f11544e18775630938a8bc53920fa0421 /lib
parent3fed6ae4b027f9c93be18520f87bd06bdffd196b (diff)
parentb685a7350ae76bc0f388e24b36d06a63776c68ee (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - procfs updates - various misc bits - lib/ updates - epoll updates - autofs - fatfs - a few more MM bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (58 commits) mm/page_io.c: fix polled swap page in checkpatch: add Co-developed-by to signature tags docs: fix Co-Developed-by docs drivers/base/platform.c: kmemleak ignore a known leak fs: don't open code lru_to_page() fs/: remove caller signal_pending branch predictions mm/: remove caller signal_pending branch predictions arch/arc/mm/fault.c: remove caller signal_pending_branch predictions kernel/sched/: remove caller signal_pending branch predictions kernel/locking/mutex.c: remove caller signal_pending branch predictions mm: select HAVE_MOVE_PMD on x86 for faster mremap mm: speed up mremap by 20x on large regions mm: treewide: remove unused address argument from pte_alloc functions initramfs: cleanup incomplete rootfs scripts/gdb: fix lx-version string output kernel/kcov.c: mark write_comp_data() as notrace kernel/sysctl: add panic_print into sysctl panic: add options to print system info when panic happens bfs: extra sanity checking and static inode bitmap exec: separate MM_ANONPAGES and RLIMIT_STACK accounting ...
Diffstat (limited to 'lib')
-rw-r--r--lib/find_bit_benchmark.c11
-rw-r--r--lib/genalloc.c24
2 files changed, 19 insertions, 16 deletions
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5367ffa5c18f..f0e394dd2beb 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap,
const void *bitmap2, unsigned long len)
{
unsigned long i, cnt;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
- i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1);
- cycles = get_cycles() - cycles;
- pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n",
- (u64)cycles, cnt);
+ i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
+ time = ktime_get() - time;
+ pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
diff --git a/lib/genalloc.c b/lib/genalloc.c
index ca06adc4f445..f365d71cdc77 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -187,7 +187,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
- chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = vzalloc_node(nbytes, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
@@ -251,7 +251,7 @@ void gen_pool_destroy(struct gen_pool *pool)
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
- kfree(chunk);
+ vfree(chunk);
}
kfree_const(pool->name);
kfree(pool);
@@ -311,7 +311,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
- nbits, data, pool);
+ nbits, data, pool, chunk->start_addr);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
@@ -543,16 +543,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
*/
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_align *alignment;
- unsigned long align_mask;
+ unsigned long align_mask, align_off;
int order;
alignment = data;
order = pool->min_alloc_order;
align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
- return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ align_off = (start_addr & (alignment->align - 1)) >> order;
+
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, align_off);
}
EXPORT_SYMBOL(gen_pool_first_fit_align);
@@ -567,7 +570,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
*/
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_fixed *fixed_data;
int order;
@@ -601,7 +604,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
- unsigned int nr, void *data, struct gen_pool *pool)
+ unsigned int nr, void *data, struct gen_pool *pool,
+ unsigned long start_addr)
{
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
@@ -624,7 +628,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
unsigned long start_bit = size;
unsigned long len = size + 1;