summaryrefslogtreecommitdiff
path: root/kernel/cgroup/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cgroup/cpuset.c')
-rw-r--r--kernel/cgroup/cpuset.c357
1 files changed, 221 insertions, 136 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 4aaad07b0bd1..6e6eb09b8db6 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -82,14 +82,18 @@ static cpumask_var_t subpartitions_cpus;
static cpumask_var_t isolated_cpus;
/*
+ * isolated_cpus updating flag (protected by cpuset_mutex)
+ * Set if isolated_cpus is going to be updated in the current
+ * cpuset_mutex crtical section.
+ */
+static bool isolated_cpus_updating;
+
+/*
* Housekeeping (HK_TYPE_DOMAIN) CPUs at boot
*/
static cpumask_var_t boot_hk_cpus;
static bool have_boot_isolcpus;
-/* List of remote partition root children */
-static struct list_head remote_children;
-
/*
* A flag to force sched domain rebuild at the end of an operation.
* It can be set in
@@ -212,7 +216,7 @@ static struct cpuset top_cpuset = {
BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE),
.partition_root_state = PRS_ROOT,
.relax_domain_level = -1,
- .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
+ .remote_partition = false,
};
/*
@@ -352,33 +356,55 @@ static inline bool is_in_v2_mode(void)
(cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
}
+static inline bool cpuset_is_populated(struct cpuset *cs)
+{
+ lockdep_assert_held(&cpuset_mutex);
+
+ /* Cpusets in the process of attaching should be considered as populated */
+ return cgroup_is_populated(cs->css.cgroup) ||
+ cs->attach_in_progress;
+}
+
/**
* partition_is_populated - check if partition has tasks
* @cs: partition root to be checked
* @excluded_child: a child cpuset to be excluded in task checking
* Return: true if there are tasks, false otherwise
*
- * It is assumed that @cs is a valid partition root. @excluded_child should
- * be non-NULL when this cpuset is going to become a partition itself.
+ * @cs should be a valid partition root or going to become a partition root.
+ * @excluded_child should be non-NULL when this cpuset is going to become a
+ * partition itself.
+ *
+ * Note that a remote partition is not allowed underneath a valid local
+ * or remote partition. So if a non-partition root child is populated,
+ * the whole partition is considered populated.
*/
static inline bool partition_is_populated(struct cpuset *cs,
struct cpuset *excluded_child)
{
- struct cgroup_subsys_state *css;
- struct cpuset *child;
+ struct cpuset *cp;
+ struct cgroup_subsys_state *pos_css;
- if (cs->css.cgroup->nr_populated_csets)
+ /*
+ * We cannot call cs_is_populated(cs) directly, as
+ * nr_populated_domain_children may include populated
+ * csets from descendants that are partitions.
+ */
+ if (cs->css.cgroup->nr_populated_csets ||
+ cs->attach_in_progress)
return true;
- if (!excluded_child && !cs->nr_subparts)
- return cgroup_is_populated(cs->css.cgroup);
rcu_read_lock();
- cpuset_for_each_child(child, css, cs) {
- if (child == excluded_child)
+ cpuset_for_each_descendant_pre(cp, pos_css, cs) {
+ if (cp == cs || cp == excluded_child)
continue;
- if (is_partition_valid(child))
+
+ if (is_partition_valid(cp)) {
+ pos_css = css_rightmost_descendant(pos_css);
continue;
- if (cgroup_is_populated(child->css.cgroup)) {
+ }
+
+ if (cpuset_is_populated(cp)) {
rcu_read_unlock();
return true;
}
@@ -663,7 +689,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
* be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
- if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
+ if (cpuset_is_populated(cur)) {
if (!cpumask_empty(cur->cpus_allowed) &&
cpumask_empty(trial->cpus_allowed))
goto out;
@@ -1302,7 +1328,6 @@ static void reset_partition_data(struct cpuset *cs)
lockdep_assert_held(&callback_lock);
- cs->nr_subparts = 0;
if (cpumask_empty(cs->exclusive_cpus)) {
cpumask_clear(cs->effective_xcpus);
if (is_cpu_exclusive(cs))
@@ -1325,6 +1350,8 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
cpumask_or(isolated_cpus, isolated_cpus, xcpus);
else
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
+
+ isolated_cpus_updating = true;
}
/*
@@ -1332,15 +1359,12 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
* @new_prs: new partition_root_state
* @parent: parent cpuset
* @xcpus: exclusive CPUs to be added
- * Return: true if isolated_cpus modified, false otherwise
*
* Remote partition if parent == NULL
*/
-static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
+static void partition_xcpus_add(int new_prs, struct cpuset *parent,
struct cpumask *xcpus)
{
- bool isolcpus_updated;
-
WARN_ON_ONCE(new_prs < 0);
lockdep_assert_held(&callback_lock);
if (!parent)
@@ -1350,13 +1374,11 @@ static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
if (parent == &top_cpuset)
cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
- isolcpus_updated = (new_prs != parent->partition_root_state);
- if (isolcpus_updated)
+ if (new_prs != parent->partition_root_state)
isolated_cpus_update(parent->partition_root_state, new_prs,
xcpus);
cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
- return isolcpus_updated;
}
/*
@@ -1364,15 +1386,12 @@ static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
* @old_prs: old partition_root_state
* @parent: parent cpuset
* @xcpus: exclusive CPUs to be removed
- * Return: true if isolated_cpus modified, false otherwise
*
* Remote partition if parent == NULL
*/
-static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
+static void partition_xcpus_del(int old_prs, struct cpuset *parent,
struct cpumask *xcpus)
{
- bool isolcpus_updated;
-
WARN_ON_ONCE(old_prs < 0);
lockdep_assert_held(&callback_lock);
if (!parent)
@@ -1381,30 +1400,95 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
if (parent == &top_cpuset)
cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
- isolcpus_updated = (old_prs != parent->partition_root_state);
- if (isolcpus_updated)
+ if (old_prs != parent->partition_root_state)
isolated_cpus_update(old_prs, parent->partition_root_state,
xcpus);
cpumask_and(xcpus, xcpus, cpu_active_mask);
cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
- return isolcpus_updated;
}
-static void update_isolation_cpumasks(bool isolcpus_updated)
+/*
+ * isolated_cpus_can_update - check for isolated & nohz_full conflicts
+ * @add_cpus: cpu mask for cpus that are going to be isolated
+ * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
+ * Return: false if there is conflict, true otherwise
+ *
+ * If nohz_full is enabled and we have isolated CPUs, their combination must
+ * still leave housekeeping CPUs.
+ *
+ * TBD: Should consider merging this function into
+ * prstate_housekeeping_conflict().
+ */
+static bool isolated_cpus_can_update(struct cpumask *add_cpus,
+ struct cpumask *del_cpus)
{
- int ret;
+ cpumask_var_t full_hk_cpus;
+ int res = true;
- lockdep_assert_cpus_held();
+ if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return true;
- if (!isolcpus_updated)
+ if (del_cpus && cpumask_weight_and(del_cpus,
+ housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
+ return true;
+
+ if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
+ return false;
+
+ cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
+ cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
+ if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
+ res = false;
+
+ free_cpumask_var(full_hk_cpus);
+ return res;
+}
+
+/*
+ * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
+ * @prstate: partition root state to be checked
+ * @new_cpus: cpu mask
+ * Return: true if there is conflict, false otherwise
+ *
+ * CPUs outside of boot_hk_cpus, if defined, can only be used in an
+ * isolated partition.
+ */
+static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
+{
+ if (!have_boot_isolcpus)
+ return false;
+
+ if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
+ return true;
+
+ return false;
+}
+
+/*
+ * update_isolation_cpumasks - Update external isolation related CPU masks
+ *
+ * The following external CPU masks will be updated if necessary:
+ * - workqueue unbound cpumask
+ */
+static void update_isolation_cpumasks(void)
+{
+ int ret;
+
+ if (!isolated_cpus_updating)
return;
+ lockdep_assert_cpus_held();
+
ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
ret = tmigr_isolated_exclude_cpumask(isolated_cpus);
WARN_ON_ONCE(ret < 0);
+
+ isolated_cpus_updating = false;
}
/**
@@ -1508,7 +1592,7 @@ static int compute_trialcs_excpus(struct cpuset *trialcs, struct cpuset *cs)
static inline bool is_remote_partition(struct cpuset *cs)
{
- return !list_empty(&cs->remote_sibling);
+ return cs->remote_partition;
}
static inline bool is_local_partition(struct cpuset *cs)
@@ -1529,8 +1613,6 @@ static inline bool is_local_partition(struct cpuset *cs)
static int remote_partition_enable(struct cpuset *cs, int new_prs,
struct tmpmasks *tmp)
{
- bool isolcpus_updated;
-
/*
* The user must have sysadmin privilege.
*/
@@ -1552,13 +1634,17 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
+ if (((new_prs == PRS_ISOLATED) &&
+ !isolated_cpus_can_update(tmp->new_cpus, NULL)) ||
+ prstate_housekeeping_conflict(new_prs, tmp->new_cpus))
+ return PERR_HKEEPING;
spin_lock_irq(&callback_lock);
- isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
- list_add(&cs->remote_sibling, &remote_children);
+ partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
+ cs->remote_partition = true;
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks(isolcpus_updated);
+ update_isolation_cpumasks();
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1581,15 +1667,12 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
*/
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
{
- bool isolcpus_updated;
-
WARN_ON_ONCE(!is_remote_partition(cs));
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
spin_lock_irq(&callback_lock);
- list_del_init(&cs->remote_sibling);
- isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
- NULL, cs->effective_xcpus);
+ cs->remote_partition = false;
+ partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus);
if (cs->prs_err)
cs->partition_root_state = -cs->partition_root_state;
else
@@ -1599,7 +1682,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_excpus(cs, cs->effective_xcpus);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks(isolcpus_updated);
+ update_isolation_cpumasks();
cpuset_force_rebuild();
/*
@@ -1624,7 +1707,6 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
{
bool adding, deleting;
int prs = cs->partition_root_state;
- int isolcpus_updated = 0;
if (WARN_ON_ONCE(!is_remote_partition(cs)))
return;
@@ -1651,15 +1733,18 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
+ else if ((prs == PRS_ISOLATED) &&
+ !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
+ cs->prs_err = PERR_HKEEPING;
if (cs->prs_err)
goto invalidate;
}
spin_lock_irq(&callback_lock);
if (adding)
- isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
+ partition_xcpus_add(prs, NULL, tmp->addmask);
if (deleting)
- isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
+ partition_xcpus_del(prs, NULL, tmp->delmask);
/*
* Need to update effective_xcpus and exclusive_cpus now as
* update_sibling_cpumasks() below may iterate back to the same cs.
@@ -1668,7 +1753,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks(isolcpus_updated);
+ update_isolation_cpumasks();
if (adding || deleting)
cpuset_force_rebuild();
@@ -1683,26 +1768,6 @@ invalidate:
remote_partition_disable(cs, tmp);
}
-/*
- * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
- * @prstate: partition root state to be checked
- * @new_cpus: cpu mask
- * Return: true if there is conflict, false otherwise
- *
- * CPUs outside of boot_hk_cpus, if defined, can only be used in an
- * isolated partition.
- */
-static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
-{
- if (!have_boot_isolcpus)
- return false;
-
- if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus))
- return true;
-
- return false;
-}
-
/**
* update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
* @cs: The cpuset that requests change in partition root state
@@ -1749,9 +1814,8 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
int deleting; /* Deleting cpus from parent's effective_cpus */
int old_prs, new_prs;
int part_error = PERR_NONE; /* Partition error? */
- int subparts_delta = 0;
- int isolcpus_updated = 0;
struct cpumask *xcpus = user_xcpus(cs);
+ int parent_prs = parent->partition_root_state;
bool nocpu;
lockdep_assert_held(&cpuset_mutex);
@@ -1774,10 +1838,9 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
if (is_partition_valid(parent))
adding = cpumask_and(tmp->addmask,
xcpus, parent->effective_xcpus);
- if (old_prs > 0) {
+ if (old_prs > 0)
new_prs = -old_prs;
- subparts_delta--;
- }
+
goto write_error;
}
@@ -1816,6 +1879,10 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
if (prstate_housekeeping_conflict(new_prs, xcpus))
return PERR_HKEEPING;
+ if ((new_prs == PRS_ISOLATED) && (new_prs != parent_prs) &&
+ !isolated_cpus_can_update(xcpus, NULL))
+ return PERR_HKEEPING;
+
if (tasks_nocpu_error(parent, cs, xcpus))
return PERR_NOCPUS;
@@ -1832,7 +1899,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, parent->effective_cpus));
deleting = true;
- subparts_delta++;
} else if (cmd == partcmd_disable) {
/*
* May need to add cpus back to parent's effective_cpus
@@ -1843,7 +1909,6 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
if (is_partition_valid(cs)) {
cpumask_copy(tmp->addmask, cs->effective_xcpus);
adding = true;
- subparts_delta--;
}
new_prs = PRS_MEMBER;
} else if (newmask) {
@@ -1871,6 +1936,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
*
* For invalid partition:
* delmask = newmask & parent->effective_xcpus
+ * The partition may become valid soon.
*/
if (is_partition_invalid(cs)) {
adding = false;
@@ -1885,6 +1951,23 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
deleting = cpumask_and(tmp->delmask, tmp->delmask,
parent->effective_xcpus);
}
+
+ /*
+ * TBD: Invalidate a currently valid child root partition may
+ * still break isolated_cpus_can_update() rule if parent is an
+ * isolated partition.
+ */
+ if (is_partition_valid(cs) && (old_prs != parent_prs)) {
+ if ((parent_prs == PRS_ROOT) &&
+ /* Adding to parent means removing isolated CPUs */
+ !isolated_cpus_can_update(tmp->delmask, tmp->addmask))
+ part_error = PERR_HKEEPING;
+ if ((parent_prs == PRS_ISOLATED) &&
+ /* Adding to parent means adding isolated CPUs */
+ !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
+ part_error = PERR_HKEEPING;
+ }
+
/*
* The new CPUs to be removed from parent's effective CPUs
* must be present.
@@ -1966,17 +2049,13 @@ write_error:
switch (cs->partition_root_state) {
case PRS_ROOT:
case PRS_ISOLATED:
- if (part_error) {
+ if (part_error)
new_prs = -old_prs;
- subparts_delta--;
- }
break;
case PRS_INVALID_ROOT:
case PRS_INVALID_ISOLATED:
- if (!part_error) {
+ if (!part_error)
new_prs = -old_prs;
- subparts_delta++;
- }
break;
}
}
@@ -2005,28 +2084,20 @@ write_error:
* newly deleted ones will be added back to effective_cpus.
*/
spin_lock_irq(&callback_lock);
- if (old_prs != new_prs) {
+ if (old_prs != new_prs)
cs->partition_root_state = new_prs;
- if (new_prs <= 0)
- cs->nr_subparts = 0;
- }
+
/*
* Adding to parent's effective_cpus means deletion CPUs from cs
* and vice versa.
*/
if (adding)
- isolcpus_updated += partition_xcpus_del(old_prs, parent,
- tmp->addmask);
+ partition_xcpus_del(old_prs, parent, tmp->addmask);
if (deleting)
- isolcpus_updated += partition_xcpus_add(new_prs, parent,
- tmp->delmask);
+ partition_xcpus_add(new_prs, parent, tmp->delmask);
- if (is_partition_valid(parent)) {
- parent->nr_subparts += subparts_delta;
- WARN_ON_ONCE(parent->nr_subparts < 0);
- }
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks(isolcpus_updated);
+ update_isolation_cpumasks();
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -2108,8 +2179,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs,
*/
spin_lock_irq(&callback_lock);
make_partition_invalid(child);
- cs->nr_subparts--;
- child->nr_subparts = 0;
spin_unlock_irq(&callback_lock);
notify_partition_change(child, old_prs);
continue;
@@ -2138,7 +2207,6 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
- bool need_rebuild_sched_domains = false;
int old_prs, new_prs;
rcu_read_lock();
@@ -2302,15 +2370,12 @@ get_css:
if (!cpumask_empty(cp->cpus_allowed) &&
is_sched_load_balance(cp) &&
(!cpuset_v2() || is_partition_valid(cp)))
- need_rebuild_sched_domains = true;
+ cpuset_force_rebuild();
rcu_read_lock();
css_put(&cp->css);
}
rcu_read_unlock();
-
- if (need_rebuild_sched_domains)
- cpuset_force_rebuild();
}
/**
@@ -2848,21 +2913,19 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
*/
retval = nodelist_parse(buf, trialcs->mems_allowed);
if (retval < 0)
- goto done;
+ return retval;
if (!nodes_subset(trialcs->mems_allowed,
- top_cpuset.mems_allowed)) {
- retval = -EINVAL;
- goto done;
- }
+ top_cpuset.mems_allowed))
+ return -EINVAL;
+
+ /* No change? nothing to do */
+ if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed))
+ return 0;
- if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
- retval = 0; /* Too easy - nothing to do */
- goto done;
- }
retval = validate_change(cs, trialcs);
if (retval < 0)
- goto done;
+ return retval;
check_insane_mems_config(&trialcs->mems_allowed);
@@ -2872,8 +2935,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-done:
- return retval;
+ return 0;
}
bool current_cpuset_is_being_rebound(void)
@@ -3011,7 +3073,12 @@ static int update_prstate(struct cpuset *cs, int new_prs)
* A change in load balance state only, no change in cpumasks.
* Need to update isolated_cpus.
*/
- isolcpus_updated = true;
+ if (((new_prs == PRS_ISOLATED) &&
+ !isolated_cpus_can_update(cs->effective_xcpus, NULL)) ||
+ prstate_housekeeping_conflict(new_prs, cs->effective_xcpus))
+ err = PERR_HKEEPING;
+ else
+ isolcpus_updated = true;
} else {
/*
* Switching back to member is always allowed even if it
@@ -3046,7 +3113,7 @@ out:
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_isolation_cpumasks(isolcpus_updated);
+ update_isolation_cpumasks();
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
@@ -3552,7 +3619,6 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
- INIT_LIST_HEAD(&cs->remote_sibling);
/* Set CS_MEMORY_MIGRATE for default hierarchy */
if (cpuset_v2())
@@ -3823,7 +3889,6 @@ int __init cpuset_init(void)
nodes_setall(top_cpuset.effective_mems);
fmeter_init(&top_cpuset.fmeter);
- INIT_LIST_HEAD(&remote_children);
BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
@@ -4024,7 +4089,6 @@ static void cpuset_handle_hotplug(void)
*/
if (!cpumask_empty(subpartitions_cpus)) {
if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
- top_cpuset.nr_subparts = 0;
cpumask_clear(subpartitions_cpus);
} else {
cpumask_andnot(&new_cpus, &new_cpus,
@@ -4119,24 +4183,13 @@ void __init cpuset_init_smp(void)
BUG_ON(!cpuset_migrate_mm_wq);
}
-/**
- * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
- * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
- * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
- *
- * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
- * attached to the specified @tsk. Guaranteed to return some non-empty
- * subset of cpu_active_mask, even if this means going outside the
- * tasks cpuset, except when the task is in the top cpuset.
- **/
-
-void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
+/*
+ * Return cpus_allowed mask from a task's cpuset.
+ */
+static void __cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
{
- unsigned long flags;
struct cpuset *cs;
- spin_lock_irqsave(&callback_lock, flags);
-
cs = task_cs(tsk);
if (cs != &top_cpuset)
guarantee_active_cpus(tsk, pmask);
@@ -4156,7 +4209,39 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
if (!cpumask_intersects(pmask, cpu_active_mask))
cpumask_copy(pmask, possible_mask);
}
+}
+/**
+ * cpuset_cpus_allowed_locked - return cpus_allowed mask from a task's cpuset.
+ * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
+ * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
+ *
+ * Similir to cpuset_cpus_allowed() except that the caller must have acquired
+ * cpuset_mutex.
+ */
+void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
+{
+ lockdep_assert_held(&cpuset_mutex);
+ __cpuset_cpus_allowed_locked(tsk, pmask);
+}
+
+/**
+ * cpuset_cpus_allowed - return cpus_allowed mask from a task's cpuset.
+ * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
+ * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
+ *
+ * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
+ * attached to the specified @tsk. Guaranteed to return some non-empty
+ * subset of cpu_active_mask, even if this means going outside the
+ * tasks cpuset, except when the task is in the top cpuset.
+ **/
+
+void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&callback_lock, flags);
+ __cpuset_cpus_allowed_locked(tsk, pmask);
spin_unlock_irqrestore(&callback_lock, flags);
}