summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2021-08-31 09:10:00 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2021-08-31 09:10:00 +0100
commit65266a7c6abfa1ad915a362c41bf38576607f1f9 (patch)
tree046d86fc88c50fcdeaddb9ab4c709ae0878b1e3f /mm
parent1a7f67e618d42e9870dcd9fb0c7b2682d71fd631 (diff)
parent702f43872665e3b1cc6fdb77d238533274fc9d18 (diff)
Merge remote-tracking branch 'tip/sched/arm64' into for-next/core
* tip/sched/arm64: (785 commits) Documentation: arm64: describe asymmetric 32-bit support arm64: Remove logic to kill 32-bit tasks on 64-bit-only cores arm64: Hook up cmdline parameter to allow mismatched 32-bit EL0 arm64: Advertise CPUs capable of running 32-bit applications in sysfs arm64: Prevent offlining first CPU with 32-bit EL0 on mismatched system arm64: exec: Adjust affinity for compat tasks with mismatched 32-bit EL0 arm64: Implement task_cpu_possible_mask() sched: Introduce dl_task_check_affinity() to check proposed affinity sched: Allow task CPU affinity to be restricted on asymmetric systems sched: Split the guts of sched_setaffinity() into a helper function sched: Introduce task_struct::user_cpus_ptr to track requested affinity sched: Reject CPU affinity changes based on task_cpu_possible_mask() cpuset: Cleanup cpuset_cpus_allowed_fallback() use in select_fallback_rq() cpuset: Honour task_cpu_possible_mask() in guarantee_online_cpus() cpuset: Don't use the cpu_possible_mask as a last resort for cgroup v1 sched: Introduce task_cpu_possible_mask() to limit fallback rq selection sched: Cgroup SCHED_IDLE support sched/topology: Skip updating masks for non-online nodes Linux 5.14-rc6 lib: use PFN_PHYS() in devmem_is_allowed() ...
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c7
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/madvise.c4
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slub.c39
7 files changed, 41 insertions, 28 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 42b8b1fa6521..b94717977d17 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1558,9 +1558,12 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
gup_flags |= FOLL_WRITE;
/*
- * See check_vma_flags(): Will return -EFAULT on incompatible mappings
- * or with insufficient permissions.
+ * We want to report -EINVAL instead of -EFAULT for any permission
+ * problems or incompatible mappings.
*/
+ if (check_vma_flags(vma, gup_flags))
+ return -EINVAL;
+
return __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked);
}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 228a2fbe0657..73d46d16d575 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -290,7 +290,7 @@ static void hex_dump_object(struct seq_file *seq,
warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
kasan_disable_current();
warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
- HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+ HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
kasan_enable_current();
}
@@ -1171,7 +1171,7 @@ static bool update_checksum(struct kmemleak_object *object)
kasan_disable_current();
kcsan_disable_current();
- object->checksum = crc32(0, (void *)object->pointer, object->size);
+ object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
kasan_enable_current();
kcsan_enable_current();
@@ -1246,7 +1246,7 @@ static void scan_block(void *_start, void *_end,
break;
kasan_disable_current();
- pointer = *ptr;
+ pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
kasan_enable_current();
untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
diff --git a/mm/madvise.c b/mm/madvise.c
index 6d3d348b17f4..5c065bc8b5f6 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -862,10 +862,12 @@ static long madvise_populate(struct vm_area_struct *vma,
switch (pages) {
case -EINTR:
return -EINTR;
- case -EFAULT: /* Incompatible mappings / permissions. */
+ case -EINVAL: /* Incompatible mappings / permissions. */
return -EINVAL;
case -EHWPOISON:
return -EHWPOISON;
+ case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
+ return -EFAULT;
default:
pr_warn_once("%s: unhandled return value: %ld\n",
__func__, pages);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ae1f5d0cb581..702a81dfe72d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3106,13 +3106,15 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
stock->cached_pgdat = pgdat;
} else if (stock->cached_pgdat != pgdat) {
/* Flush the existing cached vmstat data */
+ struct pglist_data *oldpg = stock->cached_pgdat;
+
if (stock->nr_slab_reclaimable_b) {
- mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
+ mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
stock->nr_slab_reclaimable_b);
stock->nr_slab_reclaimable_b = 0;
}
if (stock->nr_slab_unreclaimable_b) {
- mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
+ mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
stock->nr_slab_unreclaimable_b);
stock->nr_slab_unreclaimable_b = 0;
}
@@ -3574,7 +3576,8 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
unsigned long val;
if (mem_cgroup_is_root(memcg)) {
- cgroup_rstat_flush(memcg->css.cgroup);
+ /* mem_cgroup_threshold() calls here from irqsafe context */
+ cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
val = memcg_page_state(memcg, NR_FILE_PAGES) +
memcg_page_state(memcg, NR_ANON_MAPPED);
if (swap)
diff --git a/mm/migrate.c b/mm/migrate.c
index 34a9ad3e0a4f..7e240437e7d9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2068,7 +2068,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
LIST_HEAD(migratepages);
new_page_t *new;
bool compound;
- unsigned int nr_pages = thp_nr_pages(page);
+ int nr_pages = thp_nr_pages(page);
/*
* PTE mapped THP or HugeTLB page can't reach here so the page could
diff --git a/mm/slab.h b/mm/slab.h
index f997fd5e42c8..58c01a34e5b8 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -346,7 +346,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
continue;
page = virt_to_head_page(p[i]);
- objcgs = page_objcgs(page);
+ objcgs = page_objcgs_check(page);
if (!objcgs)
continue;
diff --git a/mm/slub.c b/mm/slub.c
index 090fa14628f9..f77d8cd79ef7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -576,8 +576,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length)
{
metadata_access_enable();
- print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
- 16, 1, addr, length, 1);
+ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
+ 16, 1, kasan_reset_tag((void *)addr), length, 1);
metadata_access_disable();
}
@@ -1400,12 +1400,13 @@ check_slabs:
static int __init setup_slub_debug(char *str)
{
slab_flags_t flags;
+ slab_flags_t global_flags;
char *saved_str;
char *slab_list;
bool global_slub_debug_changed = false;
bool slab_list_specified = false;
- slub_debug = DEBUG_DEFAULT_FLAGS;
+ global_flags = DEBUG_DEFAULT_FLAGS;
if (*str++ != '=' || !*str)
/*
* No options specified. Switch on full debugging.
@@ -1417,7 +1418,7 @@ static int __init setup_slub_debug(char *str)
str = parse_slub_debug_flags(str, &flags, &slab_list, true);
if (!slab_list) {
- slub_debug = flags;
+ global_flags = flags;
global_slub_debug_changed = true;
} else {
slab_list_specified = true;
@@ -1426,16 +1427,18 @@ static int __init setup_slub_debug(char *str)
/*
* For backwards compatibility, a single list of flags with list of
- * slabs means debugging is only enabled for those slabs, so the global
- * slub_debug should be 0. We can extended that to multiple lists as
+ * slabs means debugging is only changed for those slabs, so the global
+ * slub_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
+ * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
* long as there is no option specifying flags without a slab list.
*/
if (slab_list_specified) {
if (!global_slub_debug_changed)
- slub_debug = 0;
+ global_flags = slub_debug;
slub_debug_string = saved_str;
}
out:
+ slub_debug = global_flags;
if (slub_debug != 0 || slub_debug_string)
static_branch_enable(&slub_debug_enabled);
else
@@ -3236,6 +3239,16 @@ struct detached_freelist {
struct kmem_cache *s;
};
+static inline void free_nonslab_page(struct page *page, void *object)
+{
+ unsigned int order = compound_order(page);
+
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+ kfree_hook(object);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B, -(PAGE_SIZE << order));
+ __free_pages(page, order);
+}
+
/*
* This function progressively scans the array with free objects (with
* a limited look ahead) and extract objects belonging to the same
@@ -3272,9 +3285,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!PageSlab(page))) {
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- __free_pages(page, compound_order(page));
+ free_nonslab_page(page, object);
p[size] = NULL; /* mark object processed */
return size;
}
@@ -4250,13 +4261,7 @@ void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
- unsigned int order = compound_order(page);
-
- BUG_ON(!PageCompound(page));
- kfree_hook(object);
- mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
- -(PAGE_SIZE << order));
- __free_pages(page, order);
+ free_nonslab_page(page, object);
return;
}
slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);