From b9705d8778e7adc97de38f405f835a2426e14d84 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 4 Jul 2019 15:14:36 -0700 Subject: mm/page_alloc.c: fix regression with deferred struct page init Commit 0e56acae4b4d ("mm: initialize MAX_ORDER_NR_PAGES at a time instead of doing larger sections") is causing a regression on some systems when the kernel is booted as Xen dom0. The system will just hang in early boot. Reason is an endless loop in get_page_from_freelist() in case the first zone looked at has no free memory. deferred_grow_zone() is always returning true due to the following code snipplet: /* If the zone is empty somebody else may have cleared out the zone */ if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_deferred_pfn)) { pgdat->first_deferred_pfn = ULONG_MAX; pgdat_resize_unlock(pgdat, &flags); return true; } This in turn results in the loop as get_page_from_freelist() is assuming forward progress can be made by doing some more struct page initialization. Link: http://lkml.kernel.org/r/20190620160821.4210-1-jgross@suse.com Fixes: 0e56acae4b4d ("mm: initialize MAX_ORDER_NR_PAGES at a time instead of doing larger sections") Signed-off-by: Juergen Gross Suggested-by: Alexander Duyck Acked-by: Alexander Duyck Cc: Michal Hocko Cc: Pavel Tatashin Cc: Mike Rapoport Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d66bc8abe0af..8e3bc949ebcc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1826,7 +1826,8 @@ deferred_grow_zone(struct zone *zone, unsigned int order) first_deferred_pfn)) { pgdat->first_deferred_pfn = ULONG_MAX; pgdat_resize_unlock(pgdat, &flags); - return true; + /* Retry only once. */ + return first_deferred_pfn != ULONG_MAX; } /* -- cgit From cbcfa130a911c613a1d9d921af2eea171c414172 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 4 Jul 2019 15:14:39 -0700 Subject: fs/userfaultfd.c: disable irqs for fault_pending and event locks When IOCB_CMD_POLL is used on a userfaultfd, aio_poll() disables IRQs and takes kioctx::ctx_lock, then userfaultfd_ctx::fd_wqh.lock. This may have to wait for userfaultfd_ctx::fd_wqh.lock to be released by userfaultfd_ctx_read(), which in turn can be waiting for userfaultfd_ctx::fault_pending_wqh.lock or userfaultfd_ctx::event_wqh.lock. But elsewhere the fault_pending_wqh and event_wqh locks are taken with IRQs enabled. Since the IRQ handler may take kioctx::ctx_lock, lockdep reports that a deadlock is possible. Fix it by always disabling IRQs when taking the fault_pending_wqh and event_wqh locks. Commit ae62c16e105a ("userfaultfd: disable irqs when taking the waitqueue lock") didn't fix this because it only accounted for the fd_wqh lock, not the other locks nested inside it. Link: http://lkml.kernel.org/r/20190627075004.21259-1-ebiggers@kernel.org Fixes: bfe4037e722e ("aio: implement IOCB_CMD_POLL") Signed-off-by: Eric Biggers Reported-by: syzbot+fab6de82892b6b9c6191@syzkaller.appspotmail.com Reported-by: syzbot+53c0b767f7ca0dc0c451@syzkaller.appspotmail.com Reported-by: syzbot+a3accb352f9c22041cfa@syzkaller.appspotmail.com Reviewed-by: Andrew Morton Cc: Christoph Hellwig Cc: Andrea Arcangeli Cc: [4.19+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/userfaultfd.c | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ae0b8b5f69e6..ccbdbd62f0d8 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -40,6 +40,16 @@ enum userfaultfd_state { /* * Start with fault_pending_wqh and fault_wqh so they're more likely * to be in the same cacheline. + * + * Locking order: + * fd_wqh.lock + * fault_pending_wqh.lock + * fault_wqh.lock + * event_wqh.lock + * + * To avoid deadlocks, IRQs must be disabled when taking any of the above locks, + * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's + * also taken in IRQ context. */ struct userfaultfd_ctx { /* waitqueue head for the pending (i.e. not read) userfaults */ @@ -458,7 +468,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : TASK_KILLABLE; - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). @@ -470,7 +480,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) * __add_wait_queue. */ set_current_state(blocking_state); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); if (!is_vm_hugetlb_page(vmf->vma)) must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, @@ -552,13 +562,13 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) * kernel stack can be released after the list_del_init. */ if (!list_empty_careful(&uwq.wq.entry)) { - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* * No need of list_del_init(), the uwq on the stack * will be freed shortly anyway. */ list_del(&uwq.wq.entry); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); } /* @@ -583,7 +593,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, init_waitqueue_entry(&ewq->wq, current); release_new_ctx = NULL; - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); /* * After the __add_wait_queue the uwq is visible to userland * through poll/read(). @@ -613,15 +623,15 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, break; } - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); } __set_current_state(TASK_RUNNING); - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); if (release_new_ctx) { struct vm_area_struct *vma; @@ -918,10 +928,10 @@ wakeup: * the last page faults that may have been already waiting on * the fault_*wqh. */ - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); /* Flush pending events that may still wait on event_wqh */ wake_up_all(&ctx->event_wqh); @@ -1134,7 +1144,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, if (!ret && msg->event == UFFD_EVENT_FORK) { ret = resolve_userfault_fork(ctx, fork_nctx, msg); - spin_lock(&ctx->event_wqh.lock); + spin_lock_irq(&ctx->event_wqh.lock); if (!list_empty(&fork_event)) { /* * The fork thread didn't abort, so we can @@ -1180,7 +1190,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, if (ret) userfaultfd_ctx_put(fork_nctx); } - spin_unlock(&ctx->event_wqh.lock); + spin_unlock_irq(&ctx->event_wqh.lock); } return ret; @@ -1219,14 +1229,14 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf, static void __wake_userfault(struct userfaultfd_ctx *ctx, struct userfaultfd_wake_range *range) { - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); /* wake all in the range and autoremove */ if (waitqueue_active(&ctx->fault_pending_wqh)) __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, range); if (waitqueue_active(&ctx->fault_wqh)) __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); } static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, @@ -1881,7 +1891,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) wait_queue_entry_t *wq; unsigned long pending = 0, total = 0; - spin_lock(&ctx->fault_pending_wqh.lock); + spin_lock_irq(&ctx->fault_pending_wqh.lock); list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { pending++; total++; @@ -1889,7 +1899,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { total++; } - spin_unlock(&ctx->fault_pending_wqh.lock); + spin_unlock_irq(&ctx->fault_pending_wqh.lock); /* * If more protocols will be added, there will be all shown -- cgit From dffcac2cb88e4ec5906235d64a83d802580b119e Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Thu, 4 Jul 2019 15:14:42 -0700 Subject: mm/vmscan.c: prevent useless kswapd loops In production we have noticed hard lockups on large machines running large jobs due to kswaps hoarding lru lock within isolate_lru_pages when sc->reclaim_idx is 0 which is a small zone. The lru was couple hundred GiBs and the condition (page_zonenum(page) > sc->reclaim_idx) in isolate_lru_pages() was basically skipping GiBs of pages while holding the LRU spinlock with interrupt disabled. On further inspection, it seems like there are two issues: (1) If kswapd on the return from balance_pgdat() could not sleep (i.e. node is still unbalanced), the classzone_idx is unintentionally set to 0 and the whole reclaim cycle of kswapd will try to reclaim only the lowest and smallest zone while traversing the whole memory. (2) Fundamentally isolate_lru_pages() is really bad when the allocation has woken kswapd for a smaller zone on a very large machine running very large jobs. It can hoard the LRU spinlock while skipping over 100s of GiBs of pages. This patch only fixes (1). (2) needs a more fundamental solution. To fix (1), in the kswapd context, if pgdat->kswapd_classzone_idx is invalid use the classzone_idx of the previous kswapd loop otherwise use the one the waker has requested. Link: http://lkml.kernel.org/r/20190701201847.251028-1-shakeelb@google.com Fixes: e716f2eb24de ("mm, vmscan: prevent kswapd sleeping prematurely due to mismatched classzone_idx") Signed-off-by: Shakeel Butt Reviewed-by: Yang Shi Acked-by: Mel Gorman Cc: Johannes Weiner Cc: Michal Hocko Cc: Vlastimil Babka Cc: Hillf Danton Cc: Roman Gushchin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 7889f583ced9..910e02c793ff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3644,19 +3644,18 @@ out: } /* - * pgdat->kswapd_classzone_idx is the highest zone index that a recent - * allocation request woke kswapd for. When kswapd has not woken recently, - * the value is MAX_NR_ZONES which is not a valid index. This compares a - * given classzone and returns it or the highest classzone index kswapd - * was recently woke for. + * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be + * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not + * a valid index then either kswapd runs for first time or kswapd couldn't sleep + * after previous reclaim attempt (node is still unbalanced). In that case + * return the zone index of the previous kswapd reclaim cycle. */ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, - enum zone_type classzone_idx) + enum zone_type prev_classzone_idx) { if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return classzone_idx; - - return max(pgdat->kswapd_classzone_idx, classzone_idx); + return prev_classzone_idx; + return pgdat->kswapd_classzone_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3797,7 +3796,7 @@ kswapd_try_sleep: /* Read the new order and classzone_idx */ alloc_order = reclaim_order = pgdat->kswapd_order; - classzone_idx = kswapd_classzone_idx(pgdat, 0); + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); pgdat->kswapd_order = 0; pgdat->kswapd_classzone_idx = MAX_NR_ZONES; @@ -3851,8 +3850,12 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, if (!cpuset_zone_allowed(zone, gfp_flags)) return; pgdat = zone->zone_pgdat; - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, - classzone_idx); + + if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) + pgdat->kswapd_classzone_idx = classzone_idx; + else + pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, + classzone_idx); pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; -- cgit From eef778c99c0239ed0a0696ddf22ae3673f28a489 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 4 Jul 2019 15:14:45 -0700 Subject: devres: allow const resource arguments devm_ioremap_resource() does not currently take 'const' arguments, which results in a warning from the first driver trying to do it anyway: drivers/gpio/gpio-amd-fch.c: In function 'amd_fch_gpio_probe': drivers/gpio/gpio-amd-fch.c:171:49: error: passing argument 2 of 'devm_ioremap_resource' discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers] priv->base = devm_ioremap_resource(&pdev->dev, &amd_fch_gpio_iores); ^~~~~~~~~~~~~~~~~~~ Change the prototype to allow it, as there is no real reason not to. Link: http://lkml.kernel.org/r/20190628150049.1108048-1-arnd@arndb.de Fixes: 9bb2e0452508 ("gpio: amd: Make resource struct const") Signed-off-by: Arnd Bergmann Acked-by: Greg Kroah-Hartman Reviewed-by: Enrico Weigelt Reviewed-by: Andrew Morton Cc: Greg Kroah-Hartman Cc: Linus Walleij Cc: Arnd Bergmann Cc: "Rafael J. Wysocki" Cc: Ulf Hansson Cc: Andy Shevchenko Cc: Heikki Krogerus Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/device.h | 3 ++- lib/devres.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/linux/device.h b/include/linux/device.h index 848fc71c6ba6..4a295e324ac5 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -704,7 +704,8 @@ extern unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); extern void devm_free_pages(struct device *dev, unsigned long addr); -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res); void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, diff --git a/lib/devres.c b/lib/devres.c index 69bed2f38306..6a0e9bd6524a 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap); * if (IS_ERR(base)) * return PTR_ERR(base); */ -void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) +void __iomem *devm_ioremap_resource(struct device *dev, + const struct resource *res) { resource_size_t size; void __iomem *dest_ptr; -- cgit From 8751853091998cd31e9e5f1e8206280155af8921 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 4 Jul 2019 15:14:49 -0700 Subject: swap_readpage(): avoid blk_wake_io_task() if !synchronous swap_readpage() sets waiter = bio->bi_private even if synchronous = F, this means that the caller can get the spurious wakeup after return. This can be fatal if blk_wake_io_task() does set_current_state(TASK_RUNNING) after the caller does set_special_state(), in the worst case the kernel can crash in do_task_dead(). Link: http://lkml.kernel.org/r/20190704160301.GA5956@redhat.com Fixes: 0619317ff8baa2d ("block: add polled wakeup task helper") Signed-off-by: Oleg Nesterov Reported-by: Qian Cai Acked-by: Hugh Dickins Reviewed-by: Jens Axboe Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_io.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/mm/page_io.c b/mm/page_io.c index 189415852077..a39aac2f8c8d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -137,8 +137,10 @@ out: unlock_page(page); WRITE_ONCE(bio->bi_private, NULL); bio_put(bio); - blk_wake_io_task(waiter); - put_task_struct(waiter); + if (waiter) { + blk_wake_io_task(waiter); + put_task_struct(waiter); + } } int generic_swapfile_activate(struct swap_info_struct *sis, @@ -395,11 +397,12 @@ int swap_readpage(struct page *page, bool synchronous) * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. */ - get_task_struct(current); - bio->bi_private = current; bio_set_op_attrs(bio, REQ_OP_READ, 0); - if (synchronous) + if (synchronous) { bio->bi_opf |= REQ_HIPRI; + get_task_struct(current); + bio->bi_private = current; + } count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); -- cgit