diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2023-04-04 11:03:24 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2023-04-04 11:04:30 -0300 |
commit | 692d42d411b7db6a76382537fccbee3a12a2bcdb (patch) | |
tree | 24770529cf173188bc5d2d0d9331c0ac723b631e /kernel/sched/core.c | |
parent | c52159b5be7894540acdc7a35791c0b57097fa4c (diff) | |
parent | 13a0d1ae7ee6b438f5537711a8c60cba00554943 (diff) |
Merge branch 'iommufd/for-rc' into for-next
The following selftest patch requires both the bug fixes and the
improvements of the selftest framework.
* iommufd/for-rc:
iommufd: Do not corrupt the pfn list when doing batch carry
iommufd: Fix unpinning of pages when an access is present
iommufd: Check for uptr overflow
Linux 6.3-rc5
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 7 |
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index af017e038b48..0d18c3969f90 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2084,6 +2084,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) void activate_task(struct rq *rq, struct task_struct *p, int flags) { + if (task_on_rq_migrating(p)) + flags |= ENQUEUE_MIGRATED; + enqueue_task(rq, p, flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -8414,14 +8417,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, if (len & (sizeof(unsigned long)-1)) return -EINVAL; - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { unsigned int retlen = min(len, cpumask_size()); - if (copy_to_user(user_mask_ptr, mask, retlen)) + if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) ret = -EFAULT; else ret = retlen; |