summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-08-03 14:58:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-03 14:58:13 -0700
commit995d03ae266d3f3bec8844d01b6c3ded19b9cc1c (patch)
treea98a9fa566b55def6d742578cd078afb21a44ed6 /include
parent8d3fe85f07a99cbf82f12c486414eda1d186bfa1 (diff)
parent19ec8e48582670c021e998b9deb88e39a842ff45 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "15 fixes" [ This does not merge the "fortify: use WARN instead of BUG for now" patch, which needs a bit of extra work to build cleanly with all configurations. Arnd is on it. - Linus ] * emailed patches from Andrew Morton <akpm@linux-foundation.org>: ocfs2: don't clear SGID when inheriting ACLs mm: allow page_cache_get_speculative in interrupt context userfaultfd: non-cooperative: flush event_wqh at release time ipc: add missing container_of()s for randstruct cpuset: fix a deadlock due to incomplete patching of cpusets_enabled() userfaultfd_zeropage: return -ENOSPC in case mm has gone mm: take memory hotplug lock within numa_zonelist_order_handler() mm/page_io.c: fix oops during block io poll in swapin path zram: do not free pool->size_class kthread: fix documentation build warning kasan: avoid -Wmaybe-uninitialized warning userfaultfd: non-cooperative: notify about unmap of destination during mremap mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries pid: kill pidhash_size in pidhash_init() mm/hugetlb.c: __get_user_pages ignores certain follow_hugetlb_page errors
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpuset.h19
-rw-r--r--include/linux/kthread.h2
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/pagemap.h2
4 files changed, 22 insertions, 5 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 119a3f9604b0..898cfe2eeb42 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -18,6 +18,19 @@
#ifdef CONFIG_CPUSETS
+/*
+ * Static branch rewrites can happen in an arbitrary order for a given
+ * key. In code paths where we need to loop with read_mems_allowed_begin() and
+ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
+ * to ensure that begin() always gets rewritten before retry() in the
+ * disabled -> enabled transition. If not, then if local irqs are disabled
+ * around the loop, we can deadlock since retry() would always be
+ * comparing the latest value of the mems_allowed seqcount against 0 as
+ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
+ * transition should happen in reverse order for the same reasons (want to stop
+ * looking at real value of mems_allowed.sequence in retry() first).
+ */
+extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
@@ -32,12 +45,14 @@ static inline int nr_cpusets(void)
static inline void cpuset_inc(void)
{
+ static_branch_inc(&cpusets_pre_enable_key);
static_branch_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_branch_dec(&cpusets_enabled_key);
+ static_branch_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
@@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void);
*/
static inline unsigned int read_mems_allowed_begin(void)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(&current->mems_allowed_seq);
@@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void)
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
- if (!cpusets_enabled())
+ if (!static_branch_unlikely(&cpusets_enabled_key))
return false;
return read_seqcount_retry(&current->mems_allowed_seq, seq);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 4fec8b775895..82e197eeac91 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -15,7 +15,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @...: arguments for @namefmt.
+ * @arg...: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ff151814a02d..7f384bb62d8e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -495,6 +495,10 @@ struct mm_struct {
*/
bool tlb_flush_pending;
#endif
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
+#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index baa9344dcd10..79b36f57c3ba 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -163,8 +163,6 @@ void release_pages(struct page **pages, int nr, bool cold);
*/
static inline int page_cache_get_speculative(struct page *page)
{
- VM_BUG_ON(in_interrupt());
-
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
VM_BUG_ON(!in_atomic() && !irqs_disabled());