summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2019-06-13 15:56:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-13 17:34:56 -1000
commit50f44ee7248ad2f7984ef081974a6ecd09724b3e (patch)
treeab753d9044434c94c8e6d70ead68173c2af339f6 /mm
parent1570175abd164b32ea1cc677f9dfd2dc6bd093f5 (diff)
mm/devm_memremap_pages: fix final page put race
Logan noticed that devm_memremap_pages_release() kills the percpu_ref drops all the page references that were acquired at init and then immediately proceeds to unplug, arch_remove_memory(), the backing pages for the pagemap. If for some reason device shutdown actually collides with a busy / elevated-ref-count page then arch_remove_memory() should be deferred until after that reference is dropped. As it stands the "wait for last page ref drop" happens *after* devm_memremap_pages_release() returns, which is obviously too late and can lead to crashes. Fix this situation by assigning the responsibility to wait for the percpu_ref to go idle to devm_memremap_pages() with a new ->cleanup() callback. Implement the new cleanup callback for all devm_memremap_pages() users: pmem, devdax, hmm, and p2pdma. Link: http://lkml.kernel.org/r/155727339156.292046.5432007428235387859.stgit@dwillia2-desk3.amr.corp.intel.com Fixes: 41e94a851304 ("add devm_memremap_pages") Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reported-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hmm.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index c5d840e34b28..f702a3895d05 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1354,9 +1354,8 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
complete(&devmem->completion);
}
-static void hmm_devmem_ref_exit(void *data)
+static void hmm_devmem_ref_exit(struct percpu_ref *ref)
{
- struct percpu_ref *ref = data;
struct hmm_devmem *devmem;
devmem = container_of(ref, struct hmm_devmem, ref);
@@ -1433,10 +1432,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
- if (ret)
- return ERR_PTR(ret);
-
size = ALIGN(size, PA_SECTION_SIZE);
addr = min((unsigned long)iomem_resource.end,
(1UL << MAX_PHYSMEM_BITS) - 1);
@@ -1475,6 +1470,7 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
devmem->pagemap.ref = &devmem->ref;
devmem->pagemap.data = devmem;
devmem->pagemap.kill = hmm_devmem_ref_kill;
+ devmem->pagemap.cleanup = hmm_devmem_ref_exit;
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
if (IS_ERR(result))
@@ -1512,11 +1508,6 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
- &devmem->ref);
- if (ret)
- return ERR_PTR(ret);
-
devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
devmem->pfn_last = devmem->pfn_first +
(resource_size(devmem->resource) >> PAGE_SHIFT);
@@ -1529,6 +1520,7 @@ struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
devmem->pagemap.ref = &devmem->ref;
devmem->pagemap.data = devmem;
devmem->pagemap.kill = hmm_devmem_ref_kill;
+ devmem->pagemap.cleanup = hmm_devmem_ref_exit;
result = devm_memremap_pages(devmem->device, &devmem->pagemap);
if (IS_ERR(result))