summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-08-11 13:15:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-08-11 13:15:10 -0700
commitb6c0649caf351d39e1dfb5698d7b3bb7536850b1 (patch)
tree27c23ef96945d54c9aaf1bea28f4f5dc7bfd0120
parentf6192cb7429211bfaac1178c35607b0c989900b8 (diff)
parent06282373ff57a2b82621be4f84f981e1b0a4eb28 (diff)
Merge tag 'dax-fixes-5.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull dax fixes from Dan Williams: "A filesystem-dax and device-dax fix for v5.3. The filesystem-dax fix is tagged for stable as the implementation has been mistakenly throwing away all cow pages on any truncate or hole punch operation as part of the solution to coordinate device-dma vs truncate to dax pages. The device-dax change fixes up a regression this cycle from the introduction of a common 'internal per-cpu-ref' implementation. Summary: - Fix dax_layout_busy_page() to not discard private cow pages of fs/dax private mappings. - Update the memremap_pages core to properly cleanup on behalf of internal reference-count users like device-dax" * tag 'dax-fixes-5.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: mm/memremap: Fix reuse of pgmap instances with internal references dax: dax_layout_busy_page() should not unmap cow pages
-rw-r--r--fs/dax.c2
-rw-r--r--mm/memremap.c6
2 files changed, 7 insertions, 1 deletions
diff --git a/fs/dax.c b/fs/dax.c
index b64964ef44f6..6bf81f931de3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
* guaranteed to either see new references or prevent new
* references from being established.
*/
- unmap_mapping_range(mapping, 0, 0, 1);
+ unmap_mapping_range(mapping, 0, 0, 0);
xas_lock_irq(&xas);
xas_for_each(&xas, entry, ULONG_MAX) {
diff --git a/mm/memremap.c b/mm/memremap.c
index 6ee03a816d67..86432650f829 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -91,6 +91,12 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
wait_for_completion(&pgmap->done);
percpu_ref_exit(pgmap->ref);
}
+ /*
+ * Undo the pgmap ref assignment for the internal case as the
+ * caller may re-enable the same pgmap.
+ */
+ if (pgmap->ref == &pgmap->internal_ref)
+ pgmap->ref = NULL;
}
static void devm_memremap_pages_release(void *data)