diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 35 | 
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 9f7fe3541897..d8cda4c6de6c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -472,7 +472,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)  		 * orig->shared.rb may be modified concurrently, but the clone  		 * will be reinitialized.  		 */ -		*new = data_race(*orig); +		data_race(memcpy(new, orig, sizeof(*new)));  		INIT_LIST_HEAD(&new->anon_vma_chain);  		dup_anon_vma_name(orig, new);  	} @@ -585,8 +585,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,  	int retval;  	unsigned long charge = 0;  	LIST_HEAD(uf); -	MA_STATE(old_mas, &oldmm->mm_mt, 0, 0); -	MA_STATE(mas, &mm->mm_mt, 0, 0); +	VMA_ITERATOR(old_vmi, oldmm, 0); +	VMA_ITERATOR(vmi, mm, 0);  	uprobe_start_dup_mmap();  	if (mmap_write_lock_killable(oldmm)) { @@ -613,11 +613,11 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,  		goto out;  	khugepaged_fork(mm, oldmm); -	retval = mas_expected_entries(&mas, oldmm->map_count); +	retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count);  	if (retval)  		goto out; -	mas_for_each(&old_mas, mpnt, ULONG_MAX) { +	for_each_vma(old_vmi, mpnt) {  		struct file *file;  		if (mpnt->vm_flags & VM_DONTCOPY) { @@ -659,7 +659,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,  			tmp->anon_vma = NULL;  		} else if (anon_vma_fork(tmp, mpnt))  			goto fail_nomem_anon_vma_fork; -		tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); +		vm_flags_clear(tmp, VM_LOCKED_MASK);  		file = tmp->vm_file;  		if (file) {  			struct address_space *mapping = file->f_mapping; @@ -683,11 +683,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,  			hugetlb_dup_vma_private(tmp);  		/* Link the vma into the MT */ -		mas.index = tmp->vm_start; -		mas.last = tmp->vm_end - 1; -		mas_store(&mas, tmp); -		if (mas_is_err(&mas)) -			goto fail_nomem_mas_store; +		if (vma_iter_bulk_store(&vmi, tmp)) +			goto fail_nomem_vmi_store;  		mm->map_count++;  		if (!(tmp->vm_flags & VM_WIPEONFORK)) @@ -702,7 +699,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,  	/* a new mm has just been created */  	retval = arch_dup_mmap(oldmm, mm);  loop_out: -	mas_destroy(&mas); +	vma_iter_free(&vmi);  out:  	mmap_write_unlock(mm);  	flush_tlb_mm(oldmm); @@ -712,7 +709,7 @@ fail_uprobe_end:  	uprobe_end_dup_mmap();  	return retval; -fail_nomem_mas_store: +fail_nomem_vmi_store:  	unlink_anon_vmas(tmp);  fail_nomem_anon_vma_fork:  	mpol_put(vma_policy(tmp)); @@ -1044,7 +1041,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)  #endif  #ifdef CONFIG_BLK_CGROUP -	tsk->throttle_queue = NULL; +	tsk->throttle_disk = NULL;  	tsk->use_memdelay = 0;  #endif @@ -1060,6 +1057,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)  	tsk->reported_split_lock = 0;  #endif +#ifdef CONFIG_SCHED_MM_CID +	tsk->mm_cid = -1; +	tsk->mm_cid_active = 0; +#endif  	return tsk;  free_stack: @@ -1169,6 +1170,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,  	mm->user_ns = get_user_ns(user_ns);  	lru_gen_init_mm(mm); +	mm_init_cid(mm);  	return mm;  fail_pcpu: @@ -1601,6 +1603,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)  	tsk->mm = mm;  	tsk->active_mm = mm; +	sched_mm_cid_fork(tsk);  	return 0;  } @@ -2933,7 +2936,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs)  	 * - make the CLONE_DETACHED bit reusable for clone3  	 * - make the CSIGNAL bits reusable for clone3  	 */ -	if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) +	if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))  		return false;  	if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == @@ -3034,7 +3037,7 @@ void __init mm_cache_init(void)  	 * dynamically sized based on the maximum CPU number this system  	 * can have, taking hotplug into account (nr_cpu_ids).  	 */ -	mm_size = sizeof(struct mm_struct) + cpumask_size(); +	mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();  	mm_cachep = kmem_cache_create_usercopy("mm_struct",  			mm_size, ARCH_MIN_MMSTRUCT_ALIGN,  | 
