summaryrefslogtreecommitdiff
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2022-10-01 19:16:16 -0700
committerDan Williams <dan.j.williams@intel.com>2022-10-01 19:16:16 -0700
commit305a72efa791c826fe84768ca55e31adc4113ea8 (patch)
tree35c72646ea4b81528d73d3d79650004abed3285b /drivers/nvdimm
parent53fc59511fc4c567342b2ef3f7b99a086430e0b4 (diff)
parent6a02124c87f0b61dcaaeb65e7fd406d8afb40fd4 (diff)
Merge branch 'for-6.1/nvdimm' into libnvdimm-for-next
Add v6.1 content on top of some straggling updates that missed v6.0.
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/btt.c8
-rw-r--r--drivers/nvdimm/namespace_devs.c2
-rw-r--r--drivers/nvdimm/pmem.c23
-rw-r--r--drivers/nvdimm/region_devs.c38
-rw-r--r--drivers/nvdimm/security.c2
-rw-r--r--drivers/nvdimm/virtio_pmem.c9
6 files changed, 59 insertions, 23 deletions
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9613e54c7a67..0297b7882e33 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1422,7 +1422,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
struct page *page, unsigned int len, unsigned int off,
- unsigned int op, sector_t sector)
+ enum req_op op, sector_t sector)
{
int ret;
@@ -1483,7 +1483,7 @@ static void btt_submit_bio(struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
@@ -1548,14 +1548,14 @@ static int btt_blk_init(struct btt *btt)
return 0;
out_cleanup_disk:
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
return rc;
}
static void btt_blk_cleanup(struct btt *btt)
{
del_gendisk(btt->btt_disk);
- blk_cleanup_disk(btt->btt_disk);
+ put_disk(btt->btt_disk);
}
/**
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 0f863fda56e6..90eefa0ee64d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -385,7 +385,7 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
*
* BLK-space is valid as long as it does not precede a PMEM
* allocation in a given region. PMEM-space must be contiguous
- * and adjacent to an existing existing allocation (if one
+ * and adjacent to an existing allocation (if one
* exists). If reserving PMEM any space is valid.
*/
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 629d10fcf53b..7e88cd242380 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -239,7 +239,7 @@ static void pmem_submit_bio(struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, enum req_op op)
{
struct pmem_device *pmem = bdev->bd_disk->private_data;
blk_status_t rc;
@@ -450,9 +450,24 @@ static void pmem_release_disk(void *__pmem)
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
}
+static int pmem_pagemap_memory_failure(struct dev_pagemap *pgmap,
+ unsigned long pfn, unsigned long nr_pages, int mf_flags)
+{
+ struct pmem_device *pmem =
+ container_of(pgmap, struct pmem_device, pgmap);
+ u64 offset = PFN_PHYS(pfn) - pmem->phys_addr - pmem->data_offset;
+ u64 len = nr_pages << PAGE_SHIFT;
+
+ return dax_holder_notify_failure(pmem->dax_dev, offset, len, mf_flags);
+}
+
+static const struct dev_pagemap_ops fsdax_pagemap_ops = {
+ .memory_failure = pmem_pagemap_memory_failure,
+};
+
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
{
@@ -514,6 +529,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags = PFN_DEV;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -527,6 +543,7 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
@@ -596,7 +613,7 @@ out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
out:
- blk_cleanup_disk(pmem->disk);
+ put_disk(pmem->disk);
return rc;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index d976260eca7a..e0875d369762 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -133,7 +133,8 @@ static void nd_region_release(struct device *dev)
put_device(&nvdimm->dev);
}
free_percpu(nd_region->lane);
- memregion_free(nd_region->id);
+ if (!test_bit(ND_REGION_CXL, &nd_region->flags))
+ memregion_free(nd_region->id);
kfree(nd_region);
}
@@ -508,16 +509,13 @@ static ssize_t align_store(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long val, dpa;
- u32 remainder;
+ u32 mappings, remainder;
int rc;
rc = kstrtoul(buf, 0, &val);
if (rc)
return rc;
- if (!nd_region->ndr_mappings)
- return -ENXIO;
-
/*
* Ensure space-align is evenly divisible by the region
* interleave-width because the kernel typically has no facility
@@ -525,7 +523,8 @@ static ssize_t align_store(struct device *dev,
* contribute to the tail capacity in system-physical-address
* space for the namespace.
*/
- dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
+ mappings = max_t(u32, 1, nd_region->ndr_mappings);
+ dpa = div_u64_rem(val, mappings, &remainder);
if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
|| val > region_size(nd_region) || remainder)
return -EINVAL;
@@ -982,9 +981,14 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region)
return NULL;
- nd_region->id = memregion_alloc(GFP_KERNEL);
- if (nd_region->id < 0)
- goto err_id;
+ /* CXL pre-assigns memregion ids before creating nvdimm regions */
+ if (test_bit(ND_REGION_CXL, &ndr_desc->flags)) {
+ nd_region->id = ndr_desc->memregion;
+ } else {
+ nd_region->id = memregion_alloc(GFP_KERNEL);
+ if (nd_region->id < 0)
+ goto err_id;
+ }
nd_region->lane = alloc_percpu(struct nd_percpu_lane);
if (!nd_region->lane)
@@ -1043,9 +1047,10 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
- memregion_free(nd_region->id);
- err_id:
+err_percpu:
+ if (!test_bit(ND_REGION_CXL, &ndr_desc->flags))
+ memregion_free(nd_region->id);
+err_id:
kfree(nd_region);
return NULL;
}
@@ -1068,6 +1073,13 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+void nvdimm_region_delete(struct nd_region *nd_region)
+{
+ if (nd_region)
+ nd_device_unregister(&nd_region->dev, ND_SYNC);
+}
+EXPORT_SYMBOL_GPL(nvdimm_region_delete);
+
int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
@@ -1082,7 +1094,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
return rc;
}
/**
- * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
* @nd_region: interleaved pmem region
*/
int generic_nvdimm_flush(struct nd_region *nd_region)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index b5aa55c61461..8aefb60c42ff 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -408,7 +408,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return rc;
}
-void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
+static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
int rc;
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 995b6cdc67ed..20da455d2ef6 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -81,17 +81,24 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
ndr_desc.res = &res;
ndr_desc.numa_node = nid;
ndr_desc.flush = async_pmem_flush;
+ ndr_desc.provider_data = vdev;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ /*
+ * The NVDIMM region could be available before the
+ * virtio_device_ready() that is called by
+ * virtio_dev_probe(), so we set device ready here.
+ */
+ virtio_device_ready(vdev);
nd_region = nvdimm_pmem_region_create(vpmem->nvdimm_bus, &ndr_desc);
if (!nd_region) {
dev_err(&vdev->dev, "failed to create nvdimm region\n");
err = -ENXIO;
goto out_nd;
}
- nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
out_nd:
+ virtio_reset_device(vdev);
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
vdev->config->del_vqs(vdev);