diff options
| -rw-r--r-- | arch/ia64/mm/fault.c | 4 | ||||
| -rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 | ||||
| -rw-r--r-- | include/linux/mm_types.h | 2 | ||||
| -rw-r--r-- | include/linux/mmap_lock.h | 38 | ||||
| -rw-r--r-- | mm/gup.c | 2 | ||||
| -rw-r--r-- | mm/memory.c | 2 | ||||
| -rw-r--r-- | mm/mmap.c | 4 | ||||
| -rw-r--r-- | mm/mmu_notifier.c | 2 | 
9 files changed, 29 insertions, 29 deletions
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 245545b43a4c..e9ce969c8b73 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -74,8 +74,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re  	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)  		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); -	/* mmap_sem is performance critical.... */ -	prefetchw(&mm->mmap_sem); +	/* mmap_lock is performance critical.... */ +	prefetchw(&mm->mmap_lock);  	/*  	 * If we're in an interrupt or have no user context, we must not take the fault.. diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8e7e4c2bd527..c23bcd027ae1 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1359,7 +1359,7 @@ dotraplinkage void  do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,  		unsigned long address)  { -	prefetchw(¤t->mm->mmap_sem); +	prefetchw(¤t->mm->mmap_lock);  	/*  	 * KVM has two types of events that are, logically, interrupts, but  	 * are unfortunately delivered using the #PF vector.  These events are diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index dc9ef302f517..701f3995f621 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)  	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;  	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; -	might_lock_read(¤t->mm->mmap_sem); +	might_lock_read(¤t->mm->mmap_lock);  	if (userptr->mm != current->mm)  		return -EPERM; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ef6d3aface8a..b6639b30a83b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -440,7 +440,7 @@ struct mm_struct {  		spinlock_t page_table_lock; /* Protects page tables and some  					     * counters  					     */ -		struct rw_semaphore mmap_sem; +		struct rw_semaphore mmap_lock;  		struct list_head mmlist; /* List of maybe swapped mm's.	These  					  * are globally strung together off diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 43ef914e6468..0707671851a8 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -4,67 +4,67 @@  #include <linux/mmdebug.h>  #define MMAP_LOCK_INITIALIZER(name) \ -	.mmap_sem = __RWSEM_INITIALIZER((name).mmap_sem), +	.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),  static inline void mmap_init_lock(struct mm_struct *mm)  { -	init_rwsem(&mm->mmap_sem); +	init_rwsem(&mm->mmap_lock);  }  static inline void mmap_write_lock(struct mm_struct *mm)  { -	down_write(&mm->mmap_sem); +	down_write(&mm->mmap_lock);  }  static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)  { -	down_write_nested(&mm->mmap_sem, subclass); +	down_write_nested(&mm->mmap_lock, subclass);  }  static inline int mmap_write_lock_killable(struct mm_struct *mm)  { -	return down_write_killable(&mm->mmap_sem); +	return down_write_killable(&mm->mmap_lock);  }  static inline bool mmap_write_trylock(struct mm_struct *mm)  { -	return down_write_trylock(&mm->mmap_sem) != 0; +	return down_write_trylock(&mm->mmap_lock) != 0;  }  static inline void mmap_write_unlock(struct mm_struct *mm)  { -	up_write(&mm->mmap_sem); +	up_write(&mm->mmap_lock);  }  static inline void mmap_write_downgrade(struct mm_struct *mm)  { -	downgrade_write(&mm->mmap_sem); +	downgrade_write(&mm->mmap_lock);  }  static inline void mmap_read_lock(struct mm_struct *mm)  { -	down_read(&mm->mmap_sem); +	down_read(&mm->mmap_lock);  }  static inline int mmap_read_lock_killable(struct mm_struct *mm)  { -	return down_read_killable(&mm->mmap_sem); +	return down_read_killable(&mm->mmap_lock);  }  static inline bool mmap_read_trylock(struct mm_struct *mm)  { -	return down_read_trylock(&mm->mmap_sem) != 0; +	return down_read_trylock(&mm->mmap_lock) != 0;  }  static inline void mmap_read_unlock(struct mm_struct *mm)  { -	up_read(&mm->mmap_sem); +	up_read(&mm->mmap_lock);  }  static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)  { -	if (down_read_trylock(&mm->mmap_sem)) { -		rwsem_release(&mm->mmap_sem.dep_map, _RET_IP_); +	if (down_read_trylock(&mm->mmap_lock)) { +		rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);  		return true;  	}  	return false; @@ -72,19 +72,19 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)  static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)  { -	up_read_non_owner(&mm->mmap_sem); +	up_read_non_owner(&mm->mmap_lock);  }  static inline void mmap_assert_locked(struct mm_struct *mm)  { -	lockdep_assert_held(&mm->mmap_sem); -	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); +	lockdep_assert_held(&mm->mmap_lock); +	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);  }  static inline void mmap_assert_write_locked(struct mm_struct *mm)  { -	lockdep_assert_held_write(&mm->mmap_sem); -	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); +	lockdep_assert_held_write(&mm->mmap_lock); +	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);  }  #endif /* _LINUX_MMAP_LOCK_H */ @@ -2750,7 +2750,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,  		return -EINVAL;  	if (!(gup_flags & FOLL_FAST_ONLY)) -		might_lock_read(¤t->mm->mmap_sem); +		might_lock_read(¤t->mm->mmap_lock);  	start = untagged_addr(start) & PAGE_MASK;  	addr = start; diff --git a/mm/memory.c b/mm/memory.c index 533293faaaf5..823982a8f0b0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4804,7 +4804,7 @@ void __might_fault(const char *file, int line)  	__might_sleep(file, line, 0);  #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)  	if (current->mm) -		might_lock_read(¤t->mm->mmap_sem); +		might_lock_read(¤t->mm->mmap_lock);  #endif  }  EXPORT_SYMBOL(__might_fault); diff --git a/mm/mmap.c b/mm/mmap.c index 7ed7cfca451e..a28778da76a3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3474,7 +3474,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)  		 * The LSB of head.next can't change from under us  		 * because we hold the mm_all_locks_mutex.  		 */ -		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); +		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);  		/*  		 * We can safely modify head.next after taking the  		 * anon_vma->root->rwsem. If some other vma in this mm shares @@ -3504,7 +3504,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)  		 */  		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))  			BUG(); -		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); +		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);  	}  } diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 24eb9d1ed0a7..2f348b6c9c9a 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -983,7 +983,7 @@ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,  	struct mmu_notifier_subscriptions *subscriptions;  	int ret; -	might_lock(&mm->mmap_sem); +	might_lock(&mm->mmap_lock);  	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);  	if (!subscriptions || !subscriptions->has_itree) {  | 
