summaryrefslogtreecommitdiff
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 08:54:05 +0100
committerDan Williams <dan.j.williams@intel.com>2018-01-08 11:46:23 -0800
commite8d5134833006a46fcbefc5f4a84d0b62bd520e7 (patch)
treedbd532f4ef91828e251a291c967ee3afe71fd475 /drivers/nvdimm
parente7744aa25cffe26d3767c9ffcf4e130cca1dff00 (diff)
memremap: change devm_memremap_pages interface to use struct dev_pagemap
This new interface is similar to how struct device (and many others) work. The caller initializes a 'struct dev_pagemap' as required and calls 'devm_memremap_pages'. This allows the pagemap structure to be embedded in another structure and thus container_of can be used. In this way application specific members can be stored in a containing struct. This will be used by the P2P infrastructure and HMM could probably be cleaned up to use it as well (instead of having it's own, similar 'hmm_devmem_pages_create' function). Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/nd.h9
-rw-r--r--drivers/nvdimm/pfn_devs.c27
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/pmem.h1
4 files changed, 40 insertions, 34 deletions
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index e958f3724c41..8d6375ee0fda 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -368,15 +368,14 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
-struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
- struct resource *res, struct vmem_altmap *altmap);
+int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
-static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
- struct resource *res, struct vmem_altmap *altmap)
+static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct dev_pagemap *pgmap)
{
- return ERR_PTR(-ENXIO);
+ return -ENXIO;
}
static inline int devm_nsio_enable(struct device *dev,
struct nd_namespace_io *nsio)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 2adada1a5855..f5c4e8c6e29d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -542,9 +542,10 @@ static unsigned long init_altmap_reserve(resource_size_t base)
return reserve;
}
-static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
- struct resource *res, struct vmem_altmap *altmap)
+static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
+ struct resource *res = &pgmap->res;
+ struct vmem_altmap *altmap = &pgmap->altmap;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = le64_to_cpu(pfn_sb->dataoff);
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
@@ -561,11 +562,13 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
res->start += start_pad;
res->end -= end_trunc;
+ pgmap->type = MEMORY_DEVICE_HOST;
+
if (nd_pfn->mode == PFN_MODE_RAM) {
if (offset < SZ_8K)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
- altmap = NULL;
+ pgmap->altmap_valid = false;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
- offset) / PAGE_SIZE);
@@ -577,10 +580,11 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
memcpy(altmap, &__altmap, sizeof(*altmap));
altmap->free = PHYS_PFN(offset - SZ_8K);
altmap->alloc = 0;
+ pgmap->altmap_valid = true;
} else
- return ERR_PTR(-ENXIO);
+ return -ENXIO;
- return altmap;
+ return 0;
}
static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
@@ -708,19 +712,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* Determine the effective resource range and vmem_altmap from an nd_pfn
* instance.
*/
-struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
- struct resource *res, struct vmem_altmap *altmap)
+int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
{
int rc;
if (!nd_pfn->uuid || !nd_pfn->ndns)
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
rc = nd_pfn_init(nd_pfn);
if (rc)
- return ERR_PTR(rc);
+ return rc;
- /* we need a valid pfn_sb before we can init a vmem_altmap */
- return __nvdimm_setup_pfn(nd_pfn, res, altmap);
+ /* we need a valid pfn_sb before we can init a dev_pagemap */
+ return __nvdimm_setup_pfn(nd_pfn, pgmap);
}
EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7fbc5c5dc8e1..cf074b1ce219 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -298,34 +298,34 @@ static int pmem_attach_disk(struct device *dev,
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
- struct vmem_altmap __altmap, *altmap = NULL;
int nid = dev_to_node(dev), fua, wbc;
struct resource *res = &nsio->res;
+ struct resource bb_res;
struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev;
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
- struct resource pfn_res;
struct request_queue *q;
struct device *gendev;
struct gendisk *disk;
void *addr;
+ int rc;
+
+ pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
+ if (!pmem)
+ return -ENOMEM;
/* while nsio_rw_bytes is active, parse a pfn info block if present */
if (is_nd_pfn(dev)) {
nd_pfn = to_nd_pfn(dev);
- altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
- if (IS_ERR(altmap))
- return PTR_ERR(altmap);
+ rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
+ if (rc)
+ return rc;
}
/* we're attaching a block device, disable raw namespace access */
devm_nsio_disable(dev, nsio);
- pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
- if (!pmem)
- return -ENOMEM;
-
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
@@ -350,19 +350,22 @@ static int pmem_attach_disk(struct device *dev,
return -ENOMEM;
pmem->pfn_flags = PFN_DEV;
+ pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) {
- addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
- altmap);
+ addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
- pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
+ pmem->pfn_pad = resource_size(res) -
+ resource_size(&pmem->pgmap.res);
pmem->pfn_flags |= PFN_MAP;
- res = &pfn_res; /* for badblocks populate */
- res->start += pmem->data_offset;
+ memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
+ bb_res.start += pmem->data_offset;
} else if (pmem_should_map_pages(dev)) {
- addr = devm_memremap_pages(dev, &nsio->res,
- &q->q_usage_counter, NULL);
+ memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
+ pmem->pgmap.altmap_valid = false;
+ addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
+ memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
@@ -401,7 +404,7 @@ static int pmem_attach_disk(struct device *dev,
/ 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
+ nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
disk->bb = &pmem->bb;
dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 6a3cd2a10db6..a64ebc78b5df 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -22,6 +22,7 @@ struct pmem_device {
struct badblocks bb;
struct dax_device *dax_dev;
struct gendisk *disk;
+ struct dev_pagemap pgmap;
};
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,