summaryrefslogtreecommitdiff
path: root/include/linux/sched/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched/mm.h')
-rw-r--r--include/linux/sched/mm.h25
1 files changed, 17 insertions, 8 deletions
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 928a626725e6..0e1d73955fa5 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -140,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
@@ -178,7 +178,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack);
+ const struct rlimit *rlim_stack);
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -189,12 +189,11 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags, vm_flags_t);
-unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
+unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
-unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm,
- struct file *filp,
+unsigned long mm_get_unmapped_area_vmflags(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -211,7 +210,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack) {}
+ const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
@@ -318,6 +317,9 @@ static inline void might_alloc(gfp_t gfp_mask)
fs_reclaim_acquire(gfp_mask);
fs_reclaim_release(gfp_mask);
+ if (current->flags & PF_MEMALLOC)
+ return;
+
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
}
@@ -531,6 +533,13 @@ enum {
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
+ /*
+ * The atomic_read() below prevents CSE. The following should
+ * help the compiler generate more efficient code on architectures
+ * where sync_core_before_usermode() is a no-op.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE))
+ return;
if (current->mm != mm)
return;
if (likely(!(atomic_read(&mm->membarrier_state) &