diff options
Diffstat (limited to 'drivers/nvdimm')
| -rw-r--r-- | drivers/nvdimm/Kconfig | 19 | ||||
| -rw-r--r-- | drivers/nvdimm/Makefile | 1 | ||||
| -rw-r--r-- | drivers/nvdimm/badrange.c | 5 | ||||
| -rw-r--r-- | drivers/nvdimm/btt.c | 26 | ||||
| -rw-r--r-- | drivers/nvdimm/btt_devs.c | 24 | ||||
| -rw-r--r-- | drivers/nvdimm/bus.c | 99 | ||||
| -rw-r--r-- | drivers/nvdimm/claim.c | 27 | ||||
| -rw-r--r-- | drivers/nvdimm/core.c | 18 | ||||
| -rw-r--r-- | drivers/nvdimm/dax_devs.c | 14 | ||||
| -rw-r--r-- | drivers/nvdimm/dimm.c | 5 | ||||
| -rw-r--r-- | drivers/nvdimm/dimm_devs.c | 48 | ||||
| -rw-r--r-- | drivers/nvdimm/e820.c | 4 | ||||
| -rw-r--r-- | drivers/nvdimm/label.c | 3 | ||||
| -rw-r--r-- | drivers/nvdimm/namespace_devs.c | 156 | ||||
| -rw-r--r-- | drivers/nvdimm/nd-core.h | 4 | ||||
| -rw-r--r-- | drivers/nvdimm/nd.h | 12 | ||||
| -rw-r--r-- | drivers/nvdimm/nd_virtio.c | 12 | ||||
| -rw-r--r-- | drivers/nvdimm/of_pmem.c | 7 | ||||
| -rw-r--r-- | drivers/nvdimm/pfn_devs.c | 72 | ||||
| -rw-r--r-- | drivers/nvdimm/pmem.c | 29 | ||||
| -rw-r--r-- | drivers/nvdimm/pmem.h | 4 | ||||
| -rw-r--r-- | drivers/nvdimm/ramdax.c | 282 | ||||
| -rw-r--r-- | drivers/nvdimm/region.c | 18 | ||||
| -rw-r--r-- | drivers/nvdimm/region_devs.c | 163 | ||||
| -rw-r--r-- | drivers/nvdimm/security.c | 14 | ||||
| -rw-r--r-- | drivers/nvdimm/virtio_pmem.c | 24 |
26 files changed, 637 insertions, 453 deletions
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index fde3e17c836c..44ab929a1ad5 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -97,6 +97,25 @@ config OF_PMEM Select Y if unsure. +config RAMDAX + tristate "Support persistent memory interfaces on RAM carveouts" + depends on X86_PMEM_LEGACY || OF || COMPILE_TEST + default LIBNVDIMM + help + Allows creation of DAX devices on RAM carveouts. + + Memory ranges that are manually specified by the + 'memmap=nn[KMG]!ss[KMG]' kernel command line or defined by dummy + pmem-region device tree nodes would be managed by this driver as DIMM + devices with support for dynamic layout of namespaces. + The driver steals 128K in the end of the memmap range for the + namespace management. This allows supporting up to 509 namespaces + (see 'ndctl create-namespace --help'). + The driver should be force bound to e820_pmem or pmem-region platform + devices using 'driver_override' device attribute. + + Select N if unsure. + config NVDIMM_KEYS def_bool y depends on ENCRYPTED_KEYS diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile index ba0296dca9db..8c268814936c 100644 --- a/drivers/nvdimm/Makefile +++ b/drivers/nvdimm/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_ND_BTT) += nd_btt.o obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o obj-$(CONFIG_OF_PMEM) += of_pmem.o obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o +obj-$(CONFIG_RAMDAX) += ramdax.o nd_pmem-y := pmem.o diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c index a002ea6fdd84..36c626db459a 100644 --- a/drivers/nvdimm/badrange.c +++ b/drivers/nvdimm/badrange.c @@ -167,7 +167,7 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num) dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n", (u64) s * 512, (u64) num * 512); /* this isn't an error as the hardware will still throw an exception */ - if (badblocks_set(bb, s, num, 1)) + if (!badblocks_set(bb, s, num, 1)) dev_info_once(bb->dev, "%s: failed for sector %llx\n", __func__, (u64) s); } @@ -278,8 +278,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, } nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - nvdimm_bus_lock(&nvdimm_bus->dev); + guard(nvdimm_bus)(&nvdimm_bus->dev); badblocks_populate(&nvdimm_bus->badrange, bb, range); - nvdimm_bus_unlock(&nvdimm_bus->dev); } EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 1e5aedaf8c7b..a933db961ed7 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -751,7 +751,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, u64 logsize, mapsize, datasize; u64 available = size; - arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL); + arena = kzalloc(sizeof(*arena), GFP_KERNEL); if (!arena) return NULL; arena->nd_btt = btt->nd_btt; @@ -978,7 +978,7 @@ static int btt_arena_write_layout(struct arena_info *arena) if (ret) return ret; - super = kzalloc(sizeof(struct btt_sb), GFP_NOIO); + super = kzalloc(sizeof(*super), GFP_NOIO); if (!super) return -ENOMEM; @@ -1478,12 +1478,12 @@ static void btt_submit_bio(struct bio *bio) bio_endio(bio); } -static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo) +static int btt_getgeo(struct gendisk *disk, struct hd_geometry *geo) { /* some standard values */ geo->heads = 1 << 6; geo->sectors = 1 << 5; - geo->cylinders = get_capacity(bd->bd_disk) >> 11; + geo->cylinders = get_capacity(disk) >> 11; return 0; } @@ -1501,9 +1501,15 @@ static int btt_blk_init(struct btt *btt) .logical_block_size = btt->sector_size, .max_hw_sectors = UINT_MAX, .max_integrity_segments = 1, + .features = BLK_FEAT_SYNCHRONOUS, }; int rc; + if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { + lim.integrity.metadata_size = btt_meta_size(btt); + lim.integrity.tag_size = btt_meta_size(btt); + } + btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(btt->btt_disk)) return PTR_ERR(btt->btt_disk); @@ -1513,17 +1519,6 @@ static int btt_blk_init(struct btt *btt) btt->btt_disk->fops = &btt_fops; btt->btt_disk->private_data = btt; - blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue); - - if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { - struct blk_integrity bi = { - .tuple_size = btt_meta_size(btt), - .tag_size = btt_meta_size(btt), - }; - blk_integrity_register(btt->btt_disk, &bi); - } - set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL); if (rc) @@ -1721,6 +1716,7 @@ static void __exit nd_btt_exit(void) MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT); MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>"); +MODULE_DESCRIPTION("NVDIMM Block Translation Table"); MODULE_LICENSE("GPL v2"); module_init(nd_btt_init); module_exit(nd_btt_exit); diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 497fd434a6a1..b3279b86bbfd 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -50,14 +50,12 @@ static ssize_t sector_size_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc ? rc : len; } @@ -93,13 +91,10 @@ static ssize_t namespace_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_btt *nd_btt = to_nd_btt(dev); - ssize_t rc; - nvdimm_bus_lock(dev); - rc = sprintf(buf, "%s\n", nd_btt->ndns + guard(nvdimm_bus)(dev); + return sprintf(buf, "%s\n", nd_btt->ndns ? dev_name(&nd_btt->ndns->dev) : ""); - nvdimm_bus_unlock(dev); - return rc; } static ssize_t namespace_store(struct device *dev, @@ -108,13 +103,11 @@ static ssize_t namespace_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc; } @@ -351,9 +344,8 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns) return -ENODEV; } - nvdimm_bus_lock(&ndns->dev); - btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); - nvdimm_bus_unlock(&ndns->dev); + scoped_guard(nvdimm_bus, &ndns->dev) + btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns); if (!btt_dev) return -ENOMEM; btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 508aed017ddc..87178a53ff9c 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -5,7 +5,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/libnvdimm.h> #include <linux/sched/mm.h> -#include <linux/vmalloc.h> +#include <linux/slab.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/blkdev.h> @@ -13,7 +13,6 @@ #include <linux/async.h> #include <linux/ndctl.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/cpu.h> #include <linux/fs.h> #include <linux/io.h> @@ -25,9 +24,12 @@ int nvdimm_major; static int nvdimm_bus_major; -static struct class *nd_class; static DEFINE_IDA(nd_ida); +static const struct class nd_class = { + .name = "nd", +}; + static int to_nd_device_type(const struct device *dev) { if (is_nvdimm(dev)) @@ -61,17 +63,15 @@ static struct module *to_bus_provider(struct device *dev) static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus) { - nvdimm_bus_lock(&nvdimm_bus->dev); + guard(nvdimm_bus)(&nvdimm_bus->dev); nvdimm_bus->probe_active++; - nvdimm_bus_unlock(&nvdimm_bus->dev); } static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus) { - nvdimm_bus_lock(&nvdimm_bus->dev); + guard(nvdimm_bus)(&nvdimm_bus->dev); if (--nvdimm_bus->probe_active == 0) wake_up(&nvdimm_bus->wait); - nvdimm_bus_unlock(&nvdimm_bus->dev); } static int nvdimm_bus_probe(struct device *dev) @@ -269,7 +269,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, } EXPORT_SYMBOL_GPL(nvdimm_clear_poison); -static int nvdimm_bus_match(struct device *dev, struct device_driver *drv); +static int nvdimm_bus_match(struct device *dev, const struct device_driver *drv); static const struct bus_type nvdimm_bus_type = { .name = "nd", @@ -465,9 +465,9 @@ static struct nd_device_driver nd_bus_driver = { }, }; -static int nvdimm_bus_match(struct device *dev, struct device_driver *drv) +static int nvdimm_bus_match(struct device *dev, const struct device_driver *drv) { - struct nd_device_driver *nd_drv = to_nd_device_driver(drv); + const struct nd_device_driver *nd_drv = to_nd_device_driver(drv); if (is_nvdimm_bus(dev) && nd_drv == &nd_bus_driver) return true; @@ -742,7 +742,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) device_initialize(dev); lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key); device_set_pm_not_required(dev); - dev->class = nd_class; + dev->class = &nd_class; dev->parent = &nvdimm_bus->dev; dev->devt = devt; dev->release = ndctl_release; @@ -765,7 +765,7 @@ err: void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus) { - device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id)); + device_destroy(&nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id)); } static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = { @@ -1028,14 +1028,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, unsigned int cmd = _IOC_NR(ioctl_cmd); struct device *dev = &nvdimm_bus->dev; void __user *p = (void __user *) arg; - char *out_env = NULL, *in_env = NULL; const char *cmd_name, *dimm_name; u32 in_len = 0, out_len = 0; unsigned int func = cmd; unsigned long cmd_mask; struct nd_cmd_pkg pkg; int rc, i, cmd_rc; - void *buf = NULL; u64 buf_len = 0; if (nvdimm) { @@ -1094,7 +1092,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an input envelope */ - in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + char *in_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); if (!in_env) return -ENOMEM; for (i = 0; i < desc->in_num; i++) { @@ -1104,17 +1102,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (in_size == UINT_MAX) { dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); - rc = -ENXIO; - goto out; + return -ENXIO; } if (in_len < ND_CMD_MAX_ENVELOPE) copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size); else copy = 0; - if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) { - rc = -EFAULT; - goto out; - } + if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) + return -EFAULT; in_len += in_size; } @@ -1126,11 +1121,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } /* process an output envelope */ - out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); - if (!out_env) { - rc = -ENOMEM; - goto out; - } + char *out_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL); + if (!out_env) + return -ENOMEM; for (i = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, @@ -1140,8 +1133,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (out_size == UINT_MAX) { dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n", dimm_name, cmd_name, i); - rc = -EFAULT; - goto out; + return -EFAULT; } if (out_len < ND_CMD_MAX_ENVELOPE) copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size); @@ -1149,8 +1141,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, copy = 0; if (copy && copy_from_user(&out_env[out_len], p + in_len + out_len, copy)) { - rc = -EFAULT; - goto out; + return -EFAULT; } out_len += out_size; } @@ -1159,30 +1150,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, if (buf_len > ND_IOCTL_MAX_BUFLEN) { dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN); - rc = -EINVAL; - goto out; + return -EINVAL; } - buf = vmalloc(buf_len); - if (!buf) { - rc = -ENOMEM; - goto out; - } + void *buf __free(kvfree) = kvzalloc(buf_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; - if (copy_from_user(buf, p, buf_len)) { - rc = -EFAULT; - goto out; - } + if (copy_from_user(buf, p, buf_len)) + return -EFAULT; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); if (rc) - goto out_unlock; + return rc; rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc); if (rc < 0) - goto out_unlock; + return rc; if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) { struct nd_cmd_clear_error *clear_err = buf; @@ -1192,16 +1178,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, } if (copy_to_user(p, buf, buf_len)) - rc = -EFAULT; + return -EFAULT; -out_unlock: - nvdimm_bus_unlock(dev); - device_unlock(dev); -out: - kfree(in_env); - kfree(out_env); - vfree(buf); - return rc; + return 0; } enum nd_ioctl_mode { @@ -1209,7 +1188,7 @@ enum nd_ioctl_mode { DIMM_IOCTL, }; -static int match_dimm(struct device *dev, void *data) +static int match_dimm(struct device *dev, const void *data) { long id = (long) data; @@ -1320,11 +1299,9 @@ int __init nvdimm_bus_init(void) goto err_dimm_chrdev; nvdimm_major = rc; - nd_class = class_create("nd"); - if (IS_ERR(nd_class)) { - rc = PTR_ERR(nd_class); + rc = class_register(&nd_class); + if (rc) goto err_class; - } rc = driver_register(&nd_bus_driver.drv); if (rc) @@ -1333,7 +1310,7 @@ int __init nvdimm_bus_init(void) return 0; err_nd_bus: - class_destroy(nd_class); + class_unregister(&nd_class); err_class: unregister_chrdev(nvdimm_major, "dimmctl"); err_dimm_chrdev: @@ -1347,7 +1324,7 @@ int __init nvdimm_bus_init(void) void nvdimm_bus_exit(void) { driver_unregister(&nd_bus_driver.drv); - class_destroy(nd_class); + class_unregister(&nd_class); unregister_chrdev(nvdimm_bus_major, "ndctl"); unregister_chrdev(nvdimm_major, "dimmctl"); bus_unregister(&nvdimm_bus_type); diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 030dbde6b088..309cd2cddb0e 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -34,11 +34,10 @@ void nd_detach_ndns(struct device *dev, if (!ndns) return; - get_device(&ndns->dev); - nvdimm_bus_lock(&ndns->dev); + + struct device *ndev __free(put_device) = get_device(&ndns->dev); + guard(nvdimm_bus)(ndev); __nd_detach_ndns(dev, _ndns); - nvdimm_bus_unlock(&ndns->dev); - put_device(&ndns->dev); } bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, @@ -56,24 +55,6 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, return true; } -bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, - struct nd_namespace_common **_ndns) -{ - bool claimed; - - nvdimm_bus_lock(&attach->dev); - claimed = __nd_attach_ndns(dev, attach, _ndns); - nvdimm_bus_unlock(&attach->dev); - return claimed; -} - -static int namespace_match(struct device *dev, void *data) -{ - char *name = data; - - return strcmp(name, dev_name(dev)) == 0; -} - static bool is_idle(struct device *dev, struct nd_namespace_common *ndns) { struct nd_region *nd_region = to_nd_region(dev->parent); @@ -168,7 +149,7 @@ ssize_t nd_namespace_store(struct device *dev, goto out; } - found = device_find_child(dev->parent, name, namespace_match); + found = device_find_child_by_name(dev->parent, name); if (!found) { dev_dbg(dev, "'%s' not found under %s\n", name, dev_name(dev->parent)); diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 2023a661bbb0..5ba204113fe1 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -141,9 +141,8 @@ static void nvdimm_map_put(void *data) struct nvdimm_map *nvdimm_map = data; struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; - nvdimm_bus_lock(&nvdimm_bus->dev); + guard(nvdimm_bus)(&nvdimm_bus->dev); kref_put(&nvdimm_map->kref, nvdimm_map_release); - nvdimm_bus_unlock(&nvdimm_bus->dev); } /** @@ -158,13 +157,13 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, { struct nvdimm_map *nvdimm_map; - nvdimm_bus_lock(dev); - nvdimm_map = find_nvdimm_map(dev, offset); - if (!nvdimm_map) - nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); - else - kref_get(&nvdimm_map->kref); - nvdimm_bus_unlock(dev); + scoped_guard(nvdimm_bus, dev) { + nvdimm_map = find_nvdimm_map(dev, offset); + if (!nvdimm_map) + nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); + else + kref_get(&nvdimm_map->kref); + } if (!nvdimm_map) return NULL; @@ -540,6 +539,7 @@ static __exit void libnvdimm_exit(void) nvdimm_devs_exit(); } +MODULE_DESCRIPTION("NVDIMM (Non-Volatile Memory Device) core"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); subsys_initcall(libnvdimm_init); diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 6b4922de3047..ba4c409ede65 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -104,14 +104,14 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) return -ENODEV; } - nvdimm_bus_lock(&ndns->dev); - nd_dax = nd_dax_alloc(nd_region); - nd_pfn = &nd_dax->nd_pfn; - dax_dev = nd_pfn_devinit(nd_pfn, ndns); - nvdimm_bus_unlock(&ndns->dev); - if (!dax_dev) - return -ENOMEM; + scoped_guard(nvdimm_bus, &ndns->dev) { + nd_dax = nd_dax_alloc(nd_region); + dax_dev = nd_dax_devinit(nd_dax, ndns); + if (!dax_dev) + return -ENOMEM; + } pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + nd_pfn = &nd_dax->nd_pfn; nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, DAX_SIG); dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>"); diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 91d9163ee303..2f6c26cc6a3e 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -117,9 +117,8 @@ static void nvdimm_remove(struct device *dev) { struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); - nvdimm_bus_lock(dev); - dev_set_drvdata(dev, NULL); - nvdimm_bus_unlock(dev); + scoped_guard(nvdimm_bus, dev) + dev_set_drvdata(dev, NULL); put_ndd(ndd); } diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 21498d461fde..e1349ef5f8fd 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -226,10 +226,10 @@ void nvdimm_drvdata_release(struct kref *kref) struct resource *res, *_r; dev_dbg(dev, "trace\n"); - nvdimm_bus_lock(dev); - for_each_dpa_resource_safe(ndd, res, _r) - nvdimm_free_dpa(ndd, res); - nvdimm_bus_unlock(dev); + scoped_guard(nvdimm_bus, dev) { + for_each_dpa_resource_safe(ndd, res, _r) + nvdimm_free_dpa(ndd, res); + } kvfree(ndd->data); kfree(ndd); @@ -319,23 +319,20 @@ static DEVICE_ATTR_RO(state); static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) { struct device *dev; - ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; dev = ndd->dev; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); nfree = 0; } else nfree--; - rc = sprintf(buf, "%d\n", nfree); - nvdimm_bus_unlock(dev); - return rc; + return sprintf(buf, "%d\n", nfree); } static ssize_t available_slots_show(struct device *dev, @@ -388,21 +385,15 @@ static ssize_t security_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - ssize_t rc; - /* * Require all userspace triggered security management to be * done while probing is idle and the DIMM is not in active use * in any region. */ - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); - rc = nvdimm_security_store(dev, buf, len); - nvdimm_bus_unlock(dev); - device_unlock(dev); - - return rc; + return nvdimm_security_store(dev, buf, len); } static DEVICE_ATTR_RW(security); @@ -454,9 +445,8 @@ static ssize_t result_show(struct device *dev, struct device_attribute *attr, ch if (!nvdimm->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); result = nvdimm->fw_ops->activate_result(nvdimm); - nvdimm_bus_unlock(dev); switch (result) { case NVDIMM_FWA_RESULT_NONE: @@ -483,9 +473,8 @@ static ssize_t activate_show(struct device *dev, struct device_attribute *attr, if (!nvdimm->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); state = nvdimm->fw_ops->activate_state(nvdimm); - nvdimm_bus_unlock(dev); switch (state) { case NVDIMM_FWA_IDLE: @@ -516,9 +505,8 @@ static ssize_t activate_store(struct device *dev, struct device_attribute *attr, else return -EINVAL; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); rc = nvdimm->fw_ops->arm(nvdimm, arg); - nvdimm_bus_unlock(dev); if (rc < 0) return rc; @@ -545,9 +533,8 @@ static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a if (!nvdimm->fw_ops) return 0; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); cap = nd_desc->fw_ops->capability(nd_desc); - nvdimm_bus_unlock(dev); if (cap < NVDIMM_FWA_CAP_QUIESCE) return 0; @@ -641,11 +628,10 @@ void nvdimm_delete(struct nvdimm *nvdimm) bool dev_put = false; /* We are shutting down. Make state frozen artificially. */ - nvdimm_bus_lock(dev); - set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); - if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags)) - dev_put = true; - nvdimm_bus_unlock(dev); + scoped_guard(nvdimm_bus, dev) { + set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags); + dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags); + } cancel_delayed_work_sync(&nvdimm->dwork); if (dev_put) put_device(dev); diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index 4cd18be9d0e9..41c67dfa8015 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -9,12 +9,11 @@ #include <linux/module.h> #include <linux/numa.h> -static int e820_pmem_remove(struct platform_device *pdev) +static void e820_pmem_remove(struct platform_device *pdev) { struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev); nvdimm_bus_unregister(nvdimm_bus); - return 0; } static int e820_register_one(struct resource *res, void *data) @@ -69,5 +68,6 @@ static struct platform_driver e820_pmem_driver = { module_platform_driver(e820_pmem_driver); MODULE_ALIAS("platform:e820_pmem*"); +MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation"); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 082253a3a956..04f4a049599a 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd) if (ndd->data) return 0; - if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { + if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 || + ndd->nsarea.config_size == 0) { dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", ndd->nsarea.max_xfer, ndd->nsarea.config_size); return -ENXIO; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index d6d558f94d6b..a5edcacfe46d 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -264,15 +264,13 @@ static ssize_t alt_name_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc < 0 ? rc : len; } @@ -849,8 +847,8 @@ static ssize_t size_store(struct device *dev, if (rc) return rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); rc = __size_store(dev, val); if (rc >= 0) @@ -866,9 +864,6 @@ static ssize_t size_store(struct device *dev, dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); - nvdimm_bus_unlock(dev); - device_unlock(dev); - return rc < 0 ? rc : len; } @@ -891,13 +886,8 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns) resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns) { - resource_size_t size; - - nvdimm_bus_lock(&ndns->dev); - size = __nvdimm_namespace_capacity(ndns); - nvdimm_bus_unlock(&ndns->dev); - - return size; + guard(nvdimm_bus)(&ndns->dev); + return __nvdimm_namespace_capacity(ndns); } EXPORT_SYMBOL(nvdimm_namespace_capacity); @@ -1044,8 +1034,8 @@ static ssize_t uuid_store(struct device *dev, } else return -ENXIO; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); if (to_ndns(dev)->claim) rc = -EBUSY; @@ -1059,8 +1049,6 @@ static ssize_t uuid_store(struct device *dev, kfree(uuid); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc < 0 ? rc : len; } @@ -1119,20 +1107,30 @@ static ssize_t sector_size_store(struct device *dev, } else return -ENXIO; - device_lock(dev); - nvdimm_bus_lock(dev); - if (to_ndns(dev)->claim) - rc = -EBUSY; - if (rc >= 0) - rc = nd_size_select_store(dev, buf, lbasize, supported); - if (rc >= 0) - rc = nd_namespace_label_update(nd_region, dev); - dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", + guard(device)(dev); + guard(nvdimm_bus)(dev); + if (to_ndns(dev)->claim) { + dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev)); + return -EBUSY; + } + + rc = nd_size_select_store(dev, buf, lbasize, supported); + if (rc < 0) { + dev_dbg(dev, "size select fail: %zd tried: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); + return rc; + } - return rc ? rc : len; + rc = nd_namespace_label_update(nd_region, dev); + if (rc < 0) { + dev_dbg(dev, "label update fail: %zd tried: %s%s", + rc, buf, buf[len - 1] == '\n' ? "" : "\n"); + return rc; + } + + dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n"); + + return len; } static DEVICE_ATTR_RW(sector_size); @@ -1145,7 +1143,7 @@ static ssize_t dpa_extents_show(struct device *dev, int count = 0, i; u32 flags = 0; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (is_namespace_pmem(dev)) { struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev); @@ -1154,7 +1152,7 @@ static ssize_t dpa_extents_show(struct device *dev, } if (!uuid) - goto out; + return sprintf(buf, "%d\n", count); nd_label_gen_id(&label_id, uuid, flags); for (i = 0; i < nd_region->ndr_mappings; i++) { @@ -1166,8 +1164,6 @@ static ssize_t dpa_extents_show(struct device *dev, if (strcmp(res->name, label_id.id) == 0) count++; } - out: - nvdimm_bus_unlock(dev); return sprintf(buf, "%d\n", count); } @@ -1279,15 +1275,13 @@ static ssize_t holder_class_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); int rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); rc = __holder_class_store(dev, buf); if (rc >= 0) rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc < 0 ? rc : len; } @@ -1612,9 +1606,6 @@ static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id) { int i; - if (!pmem_id) - return -ENODEV; - for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); @@ -1790,9 +1781,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region, case -EINVAL: dev_dbg(&nd_region->dev, "invalid label(s)\n"); break; - case -ENODEV: - dev_dbg(&nd_region->dev, "label not found\n"); - break; default: dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc); break; @@ -1937,12 +1925,16 @@ static int cmp_dpa(const void *a, const void *b) static struct device **scan_labels(struct nd_region *nd_region) { int i, count = 0; - struct device *dev, **devs = NULL; + struct device *dev, **devs; struct nd_label_ent *label_ent, *e; struct nd_mapping *nd_mapping = &nd_region->mapping[0]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1; + devs = kcalloc(2, sizeof(dev), GFP_KERNEL); + if (!devs) + return NULL; + /* "safe" because create_namespace_pmem() might list_move() label_ent */ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { struct nd_namespace_label *nd_label = label_ent->label; @@ -1961,12 +1953,14 @@ static struct device **scan_labels(struct nd_region *nd_region) goto err; if (i < count) continue; - __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); - if (!__devs) - goto err; - memcpy(__devs, devs, sizeof(dev) * count); - kfree(devs); - devs = __devs; + if (count) { + __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL); + if (!__devs) + goto err; + memcpy(__devs, devs, sizeof(dev) * count); + kfree(devs); + devs = __devs; + } dev = create_namespace_pmem(nd_region, nd_mapping, nd_label); if (IS_ERR(dev)) { @@ -1974,9 +1968,6 @@ static struct device **scan_labels(struct nd_region *nd_region) case -EAGAIN: /* skip invalid labels */ continue; - case -ENODEV: - /* fallthrough to seed creation */ - break; default: goto err; } @@ -1986,18 +1977,13 @@ static struct device **scan_labels(struct nd_region *nd_region) } dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count, - count == 1 ? "" : "s"); + str_plural(count)); if (count == 0) { struct nd_namespace_pmem *nspm; /* Publish a zero-sized namespace for userspace to configure. */ nd_mapping_free_labels(nd_mapping); - - devs = kcalloc(2, sizeof(dev), GFP_KERNEL); - if (!devs) - goto err; - nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); if (!nspm) goto err; @@ -2036,11 +2022,10 @@ static struct device **scan_labels(struct nd_region *nd_region) return devs; err: - if (devs) { - for (i = 0; devs[i]; i++) - namespace_pmem_release(devs[i]); - kfree(devs); - } + for (i = 0; devs[i]; i++) + namespace_pmem_release(devs[i]); + kfree(devs); + return NULL; } @@ -2161,31 +2146,38 @@ out: nd_region); } -int nd_region_register_namespaces(struct nd_region *nd_region, int *err) +static int create_relevant_namespaces(struct nd_region *nd_region, int *type, + struct device ***devs) { - struct device **devs = NULL; - int i, rc = 0, type; + int rc; - *err = 0; - nvdimm_bus_lock(&nd_region->dev); + guard(nvdimm_bus)(&nd_region->dev); rc = init_active_labels(nd_region); - if (rc) { - nvdimm_bus_unlock(&nd_region->dev); + if (rc) return rc; - } - type = nd_region_to_nstype(nd_region); - switch (type) { + *type = nd_region_to_nstype(nd_region); + switch (*type) { case ND_DEVICE_NAMESPACE_IO: - devs = create_namespace_io(nd_region); + *devs = create_namespace_io(nd_region); break; case ND_DEVICE_NAMESPACE_PMEM: - devs = create_namespaces(nd_region); - break; - default: + *devs = create_namespaces(nd_region); break; } - nvdimm_bus_unlock(&nd_region->dev); + + return 0; +} + +int nd_region_register_namespaces(struct nd_region *nd_region, int *err) +{ + struct device **devs = NULL; + int i, rc = 0, type; + + *err = 0; + rc = create_relevant_namespaces(nd_region, &type, &devs); + if (rc) + return rc; if (!devs) return -ENODEV; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 86976a9e8a15..bfc6bfeb6e24 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -127,8 +127,6 @@ resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region); resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nd_mapping *nd_mapping); resource_size_t nd_region_available_dpa(struct nd_region *nd_region); -int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, - resource_size_t size); resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, struct nd_label_id *label_id); int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd); @@ -136,8 +134,6 @@ void get_ndd(struct nvdimm_drvdata *ndd); resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns); void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns); -bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, - struct nd_namespace_common **_ndns); bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach, struct nd_namespace_common **_ndns); ssize_t nd_namespace_store(struct device *dev, diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 2dbb1dca17b5..b199eea3260e 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -600,6 +600,13 @@ struct nd_dax *to_nd_dax(struct device *dev); int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns); bool is_nd_dax(const struct device *dev); struct device *nd_dax_create(struct nd_region *nd_region); +static inline struct device *nd_dax_devinit(struct nd_dax *nd_dax, + struct nd_namespace_common *ndns) +{ + if (!nd_dax) + return NULL; + return nd_pfn_devinit(&nd_dax->nd_pfn, ndns); +} #else static inline int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) @@ -625,6 +632,9 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region); void nvdimm_bus_lock(struct device *dev); void nvdimm_bus_unlock(struct device *dev); +DEFINE_GUARD(nvdimm_bus, struct device *, + if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T)); + bool is_nvdimm_bus_locked(struct device *dev); void nvdimm_check_and_set_ro(struct gendisk *disk); void nvdimm_drvdata_release(struct kref *kref); @@ -666,7 +676,7 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, { if (bb->count) { sector_t first_bad; - int num_bad; + sector_t num_bad; return !!badblocks_check(bb, sector, len / 512, &first_bad, &num_bad); diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c index 1f8c667c6f1e..c3f07be4aa22 100644 --- a/drivers/nvdimm/nd_virtio.c +++ b/drivers/nvdimm/nd_virtio.c @@ -44,6 +44,15 @@ static int virtio_pmem_flush(struct nd_region *nd_region) unsigned long flags; int err, err1; + /* + * Don't bother to submit the request to the device if the device is + * not activated. + */ + if (vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_NEEDS_RESET) { + dev_info(&vdev->dev, "virtio pmem device needs a reset\n"); + return -EIO; + } + might_sleep(); req_data = kmalloc(sizeof(*req_data), GFP_KERNEL); if (!req_data) @@ -88,7 +97,7 @@ static int virtio_pmem_flush(struct nd_region *nd_region) dev_info(&vdev->dev, "failed to send command to virtio pmem device\n"); err = -EIO; } else { - /* A host repsonse results in "host_ack" getting called */ + /* A host response results in "host_ack" getting called */ wait_event(req_data->host_acked, req_data->done); err = le32_to_cpu(req_data->resp.ret); } @@ -123,4 +132,5 @@ int async_pmem_flush(struct nd_region *nd_region, struct bio *bio) return 0; }; EXPORT_SYMBOL_GPL(async_pmem_flush); +MODULE_DESCRIPTION("Virtio Persistent Memory Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index d3fca0ab6290..68bddab3fb46 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -47,7 +47,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, priv); - is_volatile = !!of_find_property(np, "volatile", NULL); + is_volatile = of_property_read_bool(np, "volatile"); dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n", is_volatile ? "volatile" : "non-volatile", np); @@ -84,14 +84,12 @@ static int of_pmem_region_probe(struct platform_device *pdev) return 0; } -static int of_pmem_region_remove(struct platform_device *pdev) +static void of_pmem_region_remove(struct platform_device *pdev) { struct of_pmem_private *priv = platform_get_drvdata(pdev); nvdimm_bus_unregister(priv->bus); kfree(priv); - - return 0; } static const struct of_device_id of_pmem_region_match[] = { @@ -111,5 +109,6 @@ static struct platform_driver of_pmem_region_driver = { module_platform_driver(of_pmem_region_driver); MODULE_DEVICE_TABLE(of, of_pmem_region_match); +MODULE_DESCRIPTION("NVDIMM Device Tree support"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("IBM Corporation"); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 586348125b61..42b172fc5576 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -56,30 +56,26 @@ static ssize_t mode_store(struct device *dev, { struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc = 0; + size_t n = len - 1; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); if (dev->driver) - rc = -EBUSY; - else { - size_t n = len - 1; - - if (strncmp(buf, "pmem\n", n) == 0 - || strncmp(buf, "pmem", n) == 0) { - nd_pfn->mode = PFN_MODE_PMEM; - } else if (strncmp(buf, "ram\n", n) == 0 - || strncmp(buf, "ram", n) == 0) - nd_pfn->mode = PFN_MODE_RAM; - else if (strncmp(buf, "none\n", n) == 0 - || strncmp(buf, "none", n) == 0) - nd_pfn->mode = PFN_MODE_NONE; - else - rc = -EINVAL; - } + return -EBUSY; + + if (strncmp(buf, "pmem\n", n) == 0 + || strncmp(buf, "pmem", n) == 0) { + nd_pfn->mode = PFN_MODE_PMEM; + } else if (strncmp(buf, "ram\n", n) == 0 + || strncmp(buf, "ram", n) == 0) + nd_pfn->mode = PFN_MODE_RAM; + else if (strncmp(buf, "none\n", n) == 0 + || strncmp(buf, "none", n) == 0) + nd_pfn->mode = PFN_MODE_NONE; + else + rc = -EINVAL; dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc ? rc : len; } @@ -125,14 +121,12 @@ static ssize_t align_store(struct device *dev, unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; ssize_t rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); rc = nd_size_select_store(dev, buf, &nd_pfn->align, nd_pfn_supported_alignments(aligns)); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc ? rc : len; } @@ -168,13 +162,10 @@ static ssize_t namespace_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); - ssize_t rc; - nvdimm_bus_lock(dev); - rc = sprintf(buf, "%s\n", nd_pfn->ndns + guard(nvdimm_bus)(dev); + return sprintf(buf, "%s\n", nd_pfn->ndns ? dev_name(&nd_pfn->ndns->dev) : ""); - nvdimm_bus_unlock(dev); - return rc; } static ssize_t namespace_store(struct device *dev, @@ -183,13 +174,11 @@ static ssize_t namespace_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nvdimm_bus_unlock(dev); - device_unlock(dev); return rc; } @@ -367,9 +356,10 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) struct nd_namespace_common *ndns = nd_pfn->ndns; void *zero_page = page_address(ZERO_PAGE(0)); struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; - int num_bad, meta_num, rc, bb_present; + int meta_num, rc, bb_present; sector_t first_bad, meta_start; struct nd_namespace_io *nsio; + sector_t num_bad; if (nd_pfn->mode != PFN_MODE_PMEM) return 0; @@ -394,7 +384,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) bb_present = badblocks_check(&nd_region->bb, meta_start, meta_num, &first_bad, &num_bad); if (bb_present) { - dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n", + dev_dbg(&nd_pfn->dev, "meta: %llx badblocks at %llx\n", num_bad, first_bad); nsoff = ALIGN_DOWN((nd_region->ndr_start + (first_bad << 9)) - nsio->res.start, @@ -413,7 +403,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn) } if (rc) { dev_err(&nd_pfn->dev, - "error clearing %x badblocks at %llx\n", + "error clearing %llx badblocks at %llx\n", num_bad, first_bad); return rc; } @@ -539,7 +529,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!nd_pfn->uuid) { /* - * When probing a namepace via nd_pfn_probe() the uuid + * When probing a namespace via nd_pfn_probe() the uuid * is NULL (see: nd_pfn_devinit()) we init settings from * pfn_sb */ @@ -638,10 +628,10 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) return -ENODEV; } - nvdimm_bus_lock(&ndns->dev); - nd_pfn = nd_pfn_alloc(nd_region); - pfn_dev = nd_pfn_devinit(nd_pfn, ndns); - nvdimm_bus_unlock(&ndns->dev); + scoped_guard(nvdimm_bus, &ndns->dev) { + nd_pfn = nd_pfn_alloc(nd_region); + pfn_dev = nd_pfn_devinit(nd_pfn, ndns); + } if (!pfn_dev) return -ENOMEM; pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 598fe2e89bda..05785ff21a8b 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -20,7 +20,6 @@ #include <linux/kstrtox.h> #include <linux/vmalloc.h> #include <linux/blk-mq.h> -#include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/dax.h> @@ -242,19 +241,19 @@ static void pmem_submit_bio(struct bio *bio) /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, - pfn_t *pfn) + unsigned long *pfn) { resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT; struct badblocks *bb = &pmem->bb; sector_t first_bad; - int num_bad; + sector_t num_bad; if (kaddr) *kaddr = pmem->virt_addr + offset; if (pfn) - *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + *pfn = PHYS_PFN(pmem->phys_addr + offset); if (bb->count && badblocks_check(bb, sector, num, &first_bad, &num_bad)) { @@ -303,7 +302,7 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, static long pmem_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, - void **kaddr, pfn_t *pfn) + void **kaddr, unsigned long *pfn) { struct pmem_device *pmem = dax_get_private(dax_dev); @@ -316,7 +315,7 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, * range, filesystem turns the normal pwrite to a dax_recovery_write. * * The recovery write consists of clearing media poison, clearing page - * HWPoison bit, reenable page-wide read-write permission, flush the + * HWPoison bit, re-enable page-wide read-write permission, flush the * caches and finally write. A competing pread thread will be held * off during the recovery process since data read back might not be * valid, and this is achieved by clearing the badblock records after @@ -455,6 +454,8 @@ static int pmem_attach_disk(struct device *dev, .logical_block_size = pmem_sector_size(ndns), .physical_block_size = PAGE_SIZE, .max_hw_sectors = UINT_MAX, + .features = BLK_FEAT_WRITE_CACHE | + BLK_FEAT_SYNCHRONOUS, }; int nid = dev_to_node(dev), fua; struct resource *res = &nsio->res; @@ -463,7 +464,6 @@ static int pmem_attach_disk(struct device *dev, struct dax_device *dax_dev; struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; - struct request_queue *q; struct gendisk *disk; void *addr; int rc; @@ -495,6 +495,10 @@ static int pmem_attach_disk(struct device *dev, dev_warn(dev, "unable to guarantee persistence of writes\n"); fua = 0; } + if (fua) + lim.features |= BLK_FEAT_FUA; + if (is_nd_pfn(dev) || pmem_should_map_pages(dev)) + lim.features |= BLK_FEAT_DAX; if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -505,11 +509,9 @@ static int pmem_attach_disk(struct device *dev, disk = blk_alloc_disk(&lim, nid); if (IS_ERR(disk)) return PTR_ERR(disk); - q = disk->queue; pmem->disk = disk; pmem->pgmap.owner = pmem; - pmem->pfn_flags = PFN_DEV; if (is_nd_pfn(dev)) { pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; @@ -518,7 +520,6 @@ static int pmem_attach_disk(struct device *dev, pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); pmem->pfn_pad = resource_size(res) - range_len(&pmem->pgmap.range); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; bb_range.start += pmem->data_offset; } else if (pmem_should_map_pages(dev)) { @@ -528,7 +529,6 @@ static int pmem_attach_disk(struct device *dev, pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; pmem->pgmap.ops = &fsdax_pagemap_ops; addr = devm_memremap_pages(dev, &pmem->pgmap); - pmem->pfn_flags |= PFN_MAP; bb_range = pmem->pgmap.range; } else { addr = devm_memremap(dev, pmem->phys_addr, @@ -543,12 +543,6 @@ static int pmem_attach_disk(struct device *dev, } pmem->virt_addr = addr; - blk_queue_write_cache(q, true, fua); - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q); - if (pmem->pfn_flags & PFN_MAP) - blk_queue_flag_set(QUEUE_FLAG_DAX, q); - disk->fops = &pmem_fops; disk->private_data = pmem; nvdimm_namespace_disk_name(ndns, disk->disk_name); @@ -768,4 +762,5 @@ static struct nd_device_driver nd_pmem_driver = { module_nd_driver(nd_pmem_driver); MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); +MODULE_DESCRIPTION("NVDIMM Persistent Memory Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 392b0b38acb9..a48509f90196 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -5,7 +5,6 @@ #include <linux/badblocks.h> #include <linux/memremap.h> #include <linux/types.h> -#include <linux/pfn_t.h> #include <linux/fs.h> enum dax_access_mode; @@ -16,7 +15,6 @@ struct pmem_device { phys_addr_t phys_addr; /* when non-zero this device is hosting a 'pfn' instance */ phys_addr_t data_offset; - u64 pfn_flags; void *virt_addr; /* immutable base size of the namespace */ size_t size; @@ -31,7 +29,7 @@ struct pmem_device { long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, - pfn_t *pfn); + unsigned long *pfn); #ifdef CONFIG_MEMORY_FAILURE static inline bool test_and_clear_pmem_poison(struct page *page) diff --git a/drivers/nvdimm/ramdax.c b/drivers/nvdimm/ramdax.c new file mode 100644 index 000000000000..954cb7919807 --- /dev/null +++ b/drivers/nvdimm/ramdax.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Mike Rapoport, Microsoft + * + * Based on e820 pmem driver: + * Copyright (c) 2015, Christoph Hellwig. + * Copyright (c) 2015, Intel Corporation. + */ +#include <linux/platform_device.h> +#include <linux/memory_hotplug.h> +#include <linux/libnvdimm.h> +#include <linux/module.h> +#include <linux/numa.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/of.h> + +#include <uapi/linux/ndctl.h> + +#define LABEL_AREA_SIZE SZ_128K + +struct ramdax_dimm { + struct nvdimm *nvdimm; + void *label_area; +}; + +static void ramdax_remove(struct platform_device *pdev) +{ + struct nvdimm_bus *nvdimm_bus = platform_get_drvdata(pdev); + + nvdimm_bus_unregister(nvdimm_bus); +} + +static int ramdax_register_region(struct resource *res, + struct nvdimm *nvdimm, + struct nvdimm_bus *nvdimm_bus) +{ + struct nd_mapping_desc mapping; + struct nd_region_desc ndr_desc; + struct nd_interleave_set *nd_set; + int nid = phys_to_target_node(res->start); + + nd_set = kzalloc(sizeof(*nd_set), GFP_KERNEL); + if (!nd_set) + return -ENOMEM; + + nd_set->cookie1 = 0xcafebeefcafebeef; + nd_set->cookie2 = nd_set->cookie1; + nd_set->altcookie = nd_set->cookie1; + + memset(&mapping, 0, sizeof(mapping)); + mapping.nvdimm = nvdimm; + mapping.start = 0; + mapping.size = resource_size(res) - LABEL_AREA_SIZE; + + memset(&ndr_desc, 0, sizeof(ndr_desc)); + ndr_desc.res = res; + ndr_desc.numa_node = numa_map_to_online_node(nid); + ndr_desc.target_node = nid; + ndr_desc.num_mappings = 1; + ndr_desc.mapping = &mapping; + ndr_desc.nd_set = nd_set; + + if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) + goto err_free_nd_set; + + return 0; + +err_free_nd_set: + kfree(nd_set); + return -ENXIO; +} + +static int ramdax_register_dimm(struct resource *res, void *data) +{ + resource_size_t start = res->start; + resource_size_t size = resource_size(res); + unsigned long flags = 0, cmd_mask = 0; + struct nvdimm_bus *nvdimm_bus = data; + struct ramdax_dimm *dimm; + int err; + + dimm = kzalloc(sizeof(*dimm), GFP_KERNEL); + if (!dimm) + return -ENOMEM; + + dimm->label_area = memremap(start + size - LABEL_AREA_SIZE, + LABEL_AREA_SIZE, MEMREMAP_WB); + if (!dimm->label_area) { + err = -ENOMEM; + goto err_free_dimm; + } + + set_bit(NDD_LABELING, &flags); + set_bit(NDD_REGISTER_SYNC, &flags); + set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); + set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); + set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask); + dimm->nvdimm = nvdimm_create(nvdimm_bus, dimm, + /* dimm_attribute_groups */ NULL, + flags, cmd_mask, 0, NULL); + if (!dimm->nvdimm) { + err = -ENOMEM; + goto err_unmap_label; + } + + err = ramdax_register_region(res, dimm->nvdimm, nvdimm_bus); + if (err) + goto err_remove_nvdimm; + + return 0; + +err_remove_nvdimm: + nvdimm_delete(dimm->nvdimm); +err_unmap_label: + memunmap(dimm->label_area); +err_free_dimm: + kfree(dimm); + return err; +} + +static int ramdax_get_config_size(struct nvdimm *nvdimm, int buf_len, + struct nd_cmd_get_config_size *cmd) +{ + if (sizeof(*cmd) > buf_len) + return -EINVAL; + + *cmd = (struct nd_cmd_get_config_size){ + .status = 0, + .config_size = LABEL_AREA_SIZE, + .max_xfer = 8, + }; + + return 0; +} + +static int ramdax_get_config_data(struct nvdimm *nvdimm, int buf_len, + struct nd_cmd_get_config_data_hdr *cmd) +{ + struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm); + + if (sizeof(*cmd) > buf_len) + return -EINVAL; + if (struct_size(cmd, out_buf, cmd->in_length) > buf_len) + return -EINVAL; + if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE) + return -EINVAL; + + memcpy(cmd->out_buf, dimm->label_area + cmd->in_offset, cmd->in_length); + + return 0; +} + +static int ramdax_set_config_data(struct nvdimm *nvdimm, int buf_len, + struct nd_cmd_set_config_hdr *cmd) +{ + struct ramdax_dimm *dimm = nvdimm_provider_data(nvdimm); + + if (sizeof(*cmd) > buf_len) + return -EINVAL; + if (struct_size(cmd, in_buf, cmd->in_length) > buf_len) + return -EINVAL; + if (size_add(cmd->in_offset, cmd->in_length) > LABEL_AREA_SIZE) + return -EINVAL; + + memcpy(dimm->label_area + cmd->in_offset, cmd->in_buf, cmd->in_length); + + return 0; +} + +static int ramdax_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd, + void *buf, unsigned int buf_len) +{ + unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm); + + if (!test_bit(cmd, &cmd_mask)) + return -ENOTTY; + + switch (cmd) { + case ND_CMD_GET_CONFIG_SIZE: + return ramdax_get_config_size(nvdimm, buf_len, buf); + case ND_CMD_GET_CONFIG_DATA: + return ramdax_get_config_data(nvdimm, buf_len, buf); + case ND_CMD_SET_CONFIG_DATA: + return ramdax_set_config_data(nvdimm, buf_len, buf); + default: + return -ENOTTY; + } +} + +static int ramdax_ctl(struct nvdimm_bus_descriptor *nd_desc, + struct nvdimm *nvdimm, unsigned int cmd, void *buf, + unsigned int buf_len, int *cmd_rc) +{ + /* + * No firmware response to translate, let the transport error + * code take precedence. + */ + *cmd_rc = 0; + + if (!nvdimm) + return -ENOTTY; + return ramdax_nvdimm_ctl(nvdimm, cmd, buf, buf_len); +} + +#ifdef CONFIG_OF +static const struct of_device_id ramdax_of_matches[] = { + { .compatible = "pmem-region", }, + { }, +}; +#endif + +static int ramdax_probe_of(struct platform_device *pdev, + struct nvdimm_bus *bus, struct device_node *np) +{ + int err; + + if (!of_match_node(ramdax_of_matches, np)) + return -ENODEV; + + for (int i = 0; i < pdev->num_resources; i++) { + err = ramdax_register_dimm(&pdev->resource[i], bus); + if (err) + goto err_unregister; + } + + return 0; + +err_unregister: + /* + * FIXME: should we unregister the dimms that were registered + * successfully + */ + return err; +} + +static int ramdax_probe(struct platform_device *pdev) +{ + static struct nvdimm_bus_descriptor nd_desc; + struct device *dev = &pdev->dev; + struct nvdimm_bus *nvdimm_bus; + struct device_node *np; + int rc = -ENXIO; + + nd_desc.provider_name = "ramdax"; + nd_desc.module = THIS_MODULE; + nd_desc.ndctl = ramdax_ctl; + nvdimm_bus = nvdimm_bus_register(dev, &nd_desc); + if (!nvdimm_bus) + goto err; + + np = dev_of_node(&pdev->dev); + if (np) + rc = ramdax_probe_of(pdev, nvdimm_bus, np); + else + rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY, + IORESOURCE_MEM, 0, -1, nvdimm_bus, + ramdax_register_dimm); + if (rc) + goto err; + + platform_set_drvdata(pdev, nvdimm_bus); + + return 0; +err: + nvdimm_bus_unregister(nvdimm_bus); + return rc; +} + +static struct platform_driver ramdax_driver = { + .probe = ramdax_probe, + .remove = ramdax_remove, + .driver = { + .name = "ramdax", + }, +}; + +module_platform_driver(ramdax_driver); + +MODULE_DESCRIPTION("NVDIMM support for e820 type-12 memory and OF pmem-region"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Microsoft Corporation"); diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index 88dc062af5f8..53567f3ed427 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -70,7 +70,7 @@ static int nd_region_probe(struct device *dev) * "<async-registered>/<total>" namespace count. */ dev_err(dev, "failed to register %d namespace%s, continuing...\n", - err, err == 1 ? "" : "s"); + err, str_plural(err)); return 0; } @@ -87,13 +87,13 @@ static void nd_region_remove(struct device *dev) device_for_each_child(dev, NULL, child_unregister); /* flush attribute readers and disable */ - nvdimm_bus_lock(dev); - nd_region->ns_seed = NULL; - nd_region->btt_seed = NULL; - nd_region->pfn_seed = NULL; - nd_region->dax_seed = NULL; - dev_set_drvdata(dev, NULL); - nvdimm_bus_unlock(dev); + scoped_guard(nvdimm_bus, dev) { + nd_region->ns_seed = NULL; + nd_region->btt_seed = NULL; + nd_region->pfn_seed = NULL; + nd_region->dax_seed = NULL; + dev_set_drvdata(dev, NULL); + } /* * Note, this assumes device_lock() context to not race @@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev) * here is ok. */ if (cpu_cache_has_invalidate_memregion()) - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); + cpu_cache_invalidate_all(); } static int child_notify(struct device *dev, void *data) diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 9e4f7ff024a0..1220530a23b6 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region) } } - cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY); + cpu_cache_invalidate_all(); out: for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; @@ -102,31 +102,44 @@ out: return 0; } -int nd_region_activate(struct nd_region *nd_region) +static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush) { - int i, j, rc, num_flush = 0; - struct nd_region_data *ndrd; - struct device *dev = &nd_region->dev; size_t flush_data_size = sizeof(void *); + int _num_flush = 0; + int i; - nvdimm_bus_lock(&nd_region->dev); + guard(nvdimm_bus)(&nd_region->dev); for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; - if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { - nvdimm_bus_unlock(&nd_region->dev); + if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) return -EBUSY; - } /* at least one null hint slot per-dimm for the "no-hint" case */ flush_data_size += sizeof(void *); - num_flush = min_not_zero(num_flush, nvdimm->num_flush); + _num_flush = min_not_zero(_num_flush, nvdimm->num_flush); if (!nvdimm->num_flush) continue; flush_data_size += nvdimm->num_flush * sizeof(void *); } - nvdimm_bus_unlock(&nd_region->dev); + + *size = flush_data_size; + *num_flush = _num_flush; + + return 0; +} + +int nd_region_activate(struct nd_region *nd_region) +{ + int i, j, rc, num_flush; + struct nd_region_data *ndrd; + struct device *dev = &nd_region->dev; + size_t flush_data_size; + + rc = get_flush_data(nd_region, &flush_data_size, &num_flush); + if (rc) + return rc; rc = nd_region_invalidate_memregion(nd_region); if (rc) @@ -327,8 +340,8 @@ static ssize_t set_cookie_show(struct device *dev, * the v1.1 namespace label cookie definition. To read all this * data we need to wait for probing to settle. */ - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); if (nd_region->ndr_mappings) { struct nd_mapping *nd_mapping = &nd_region->mapping[0]; @@ -343,8 +356,6 @@ static ssize_t set_cookie_show(struct device *dev, nsindex)); } } - nvdimm_bus_unlock(dev); - device_unlock(dev); if (rc) return rc; @@ -393,7 +404,6 @@ static ssize_t available_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - unsigned long long available = 0; /* * Flush in-flight updates and grab a snapshot of the available @@ -401,14 +411,11 @@ static ssize_t available_size_show(struct device *dev, * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); - available = nd_region_available_dpa(nd_region); - nvdimm_bus_unlock(dev); - device_unlock(dev); - return sprintf(buf, "%llu\n", available); + return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region)); } static DEVICE_ATTR_RO(available_size); @@ -416,16 +423,12 @@ static ssize_t max_available_extent_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - unsigned long long available = 0; - device_lock(dev); - nvdimm_bus_lock(dev); + guard(device)(dev); + guard(nvdimm_bus)(dev); wait_nvdimm_bus_probe_idle(dev); - available = nd_region_allocatable_dpa(nd_region); - nvdimm_bus_unlock(dev); - device_unlock(dev); - return sprintf(buf, "%llu\n", available); + return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region)); } static DEVICE_ATTR_RO(max_available_extent); @@ -433,16 +436,12 @@ static ssize_t init_namespaces_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region_data *ndrd = dev_get_drvdata(dev); - ssize_t rc; - nvdimm_bus_lock(dev); - if (ndrd) - rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); - else - rc = -ENXIO; - nvdimm_bus_unlock(dev); + guard(nvdimm_bus)(dev); + if (!ndrd) + return -ENXIO; - return rc; + return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); } static DEVICE_ATTR_RO(init_namespaces); @@ -450,15 +449,12 @@ static ssize_t namespace_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - ssize_t rc; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (nd_region->ns_seed) - rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); - else - rc = sprintf(buf, "\n"); - nvdimm_bus_unlock(dev); - return rc; + return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); + + return sprintf(buf, "\n"); } static DEVICE_ATTR_RO(namespace_seed); @@ -466,16 +462,12 @@ static ssize_t btt_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - ssize_t rc; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (nd_region->btt_seed) - rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); - else - rc = sprintf(buf, "\n"); - nvdimm_bus_unlock(dev); + return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); - return rc; + return sprintf(buf, "\n"); } static DEVICE_ATTR_RO(btt_seed); @@ -483,16 +475,12 @@ static ssize_t pfn_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - ssize_t rc; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (nd_region->pfn_seed) - rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); - else - rc = sprintf(buf, "\n"); - nvdimm_bus_unlock(dev); + return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); - return rc; + return sprintf(buf, "\n"); } static DEVICE_ATTR_RO(pfn_seed); @@ -500,16 +488,12 @@ static ssize_t dax_seed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); - ssize_t rc; - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (nd_region->dax_seed) - rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); - else - rc = sprintf(buf, "\n"); - nvdimm_bus_unlock(dev); + return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); - return rc; + return sprintf(buf, "\n"); } static DEVICE_ATTR_RO(dax_seed); @@ -581,9 +565,8 @@ static ssize_t align_store(struct device *dev, * times ensure it does not change for the duration of the * allocation. */ - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); nd_region->align = val; - nvdimm_bus_unlock(dev); return len; } @@ -890,7 +873,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping) */ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) { - nvdimm_bus_lock(dev); + guard(nvdimm_bus)(dev); if (nd_region->ns_seed == dev) { nd_region_create_ns_seed(nd_region); } else if (is_nd_btt(dev)) { @@ -915,7 +898,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) nd_region_create_ns_seed(nd_region); } - nvdimm_bus_unlock(dev); } /** @@ -1229,45 +1211,4 @@ bool is_nvdimm_sync(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(is_nvdimm_sync); -struct conflict_context { - struct nd_region *nd_region; - resource_size_t start, size; -}; - -static int region_conflict(struct device *dev, void *data) -{ - struct nd_region *nd_region; - struct conflict_context *ctx = data; - resource_size_t res_end, region_end, region_start; - - if (!is_memory(dev)) - return 0; - - nd_region = to_nd_region(dev); - if (nd_region == ctx->nd_region) - return 0; - - res_end = ctx->start + ctx->size; - region_start = nd_region->ndr_start; - region_end = region_start + nd_region->ndr_size; - if (ctx->start >= region_start && ctx->start < region_end) - return -EBUSY; - if (res_end > region_start && res_end <= region_end) - return -EBUSY; - return 0; -} - -int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, - resource_size_t size) -{ - struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); - struct conflict_context ctx = { - .nd_region = nd_region, - .start = start, - .size = size, - }; - - return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict); -} - -MODULE_IMPORT_NS(DEVMEM); +MODULE_IMPORT_NS("DEVMEM"); diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index a03e3c45f297..e41f6951ca0f 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -219,12 +219,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm) int nvdimm_security_unlock(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); - int rc; - nvdimm_bus_lock(dev); - rc = __nvdimm_security_unlock(nvdimm); - nvdimm_bus_unlock(dev); - return rc; + guard(nvdimm_bus)(dev); + return __nvdimm_security_unlock(nvdimm); } static int check_security_state(struct nvdimm *nvdimm) @@ -427,7 +424,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) * query. */ get_device(dev); - queue_delayed_work(system_wq, &nvdimm->dwork, 0); + queue_delayed_work(system_percpu_wq, &nvdimm->dwork, 0); } return rc; @@ -460,7 +457,7 @@ static void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) /* setup delayed work again */ tmo += 10; - queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ); + queue_delayed_work(system_percpu_wq, &nvdimm->dwork, tmo * HZ); nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo); return; } @@ -490,9 +487,8 @@ void nvdimm_security_overwrite_query(struct work_struct *work) struct nvdimm *nvdimm = container_of(work, typeof(*nvdimm), dwork.work); - nvdimm_bus_lock(&nvdimm->dev); + guard(nvdimm_bus)(&nvdimm->dev); __nvdimm_security_overwrite_query(nvdimm); - nvdimm_bus_unlock(&nvdimm->dev); } #define OPS \ diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index c9b97aeabf85..2396d19ce549 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -143,6 +143,28 @@ static void virtio_pmem_remove(struct virtio_device *vdev) virtio_reset_device(vdev); } +static int virtio_pmem_freeze(struct virtio_device *vdev) +{ + vdev->config->del_vqs(vdev); + virtio_reset_device(vdev); + + return 0; +} + +static int virtio_pmem_restore(struct virtio_device *vdev) +{ + int ret; + + ret = init_vq(vdev->priv); + if (ret) { + dev_err(&vdev->dev, "failed to initialize virtio pmem's vq\n"); + return ret; + } + virtio_device_ready(vdev); + + return 0; +} + static unsigned int features[] = { VIRTIO_PMEM_F_SHMEM_REGION, }; @@ -155,6 +177,8 @@ static struct virtio_driver virtio_pmem_driver = { .validate = virtio_pmem_validate, .probe = virtio_pmem_probe, .remove = virtio_pmem_remove, + .freeze = virtio_pmem_freeze, + .restore = virtio_pmem_restore, }; module_virtio_driver(virtio_pmem_driver); |
