summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/slub.c53
1 files changed, 46 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 6bcddc513963..114ec18d223b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4882,18 +4882,43 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
}
static __fastpath_inline
-void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp)
+void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp, int node)
{
struct slub_percpu_sheaves *pcs;
+ bool node_requested;
void *object;
#ifdef CONFIG_NUMA
- if (static_branch_unlikely(&strict_numa)) {
- if (current->mempolicy)
- return NULL;
+ if (static_branch_unlikely(&strict_numa) &&
+ node == NUMA_NO_NODE) {
+
+ struct mempolicy *mpol = current->mempolicy;
+
+ if (mpol) {
+ /*
+ * Special BIND rule support. If the local node
+ * is in permitted set then do not redirect
+ * to a particular node.
+ * Otherwise we apply the memory policy to get
+ * the node we need to allocate on.
+ */
+ if (mpol->mode != MPOL_BIND ||
+ !node_isset(numa_mem_id(), mpol->nodes))
+
+ node = mempolicy_slab_node();
+ }
}
#endif
+ node_requested = IS_ENABLED(CONFIG_NUMA) && node != NUMA_NO_NODE;
+
+ /*
+ * We assume the percpu sheaves contain only local objects although it's
+ * not completely guaranteed, so we verify later.
+ */
+ if (unlikely(node_requested && node != numa_mem_id()))
+ return NULL;
+
if (!local_trylock(&s->cpu_sheaves->lock))
return NULL;
@@ -4905,7 +4930,21 @@ void *alloc_from_pcs(struct kmem_cache *s, gfp_t gfp)
return NULL;
}
- object = pcs->main->objects[--pcs->main->size];
+ object = pcs->main->objects[pcs->main->size - 1];
+
+ if (unlikely(node_requested)) {
+ /*
+ * Verify that the object was from the node we want. This could
+ * be false because of cpu migration during an unlocked part of
+ * the current allocation or previous freeing process.
+ */
+ if (folio_nid(virt_to_folio(object)) != node) {
+ local_unlock(&s->cpu_sheaves->lock);
+ return NULL;
+ }
+ }
+
+ pcs->main->size--;
local_unlock(&s->cpu_sheaves->lock);
@@ -5005,8 +5044,8 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
if (unlikely(object))
goto out;
- if (s->cpu_sheaves && node == NUMA_NO_NODE)
- object = alloc_from_pcs(s, gfpflags);
+ if (s->cpu_sheaves)
+ object = alloc_from_pcs(s, gfpflags, node);
if (!object)
object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);