summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-18 17:13:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-18 17:13:31 -0700
commit237045fc3c67d44088f767dca5a9fa30815eba62 (patch)
tree50773de6320e4b3358e3935c2f3c6f93d0dc93f3 /drivers/nvme
parent35d88d97bee90fc09286d28209a64a991291a64a (diff)
parent118472ab8532e55f48395ef5764b354fe48b1d73 (diff)
Merge branch 'for-4.6/drivers' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe: "This is the block driver pull request for this merge window. It sits on top of for-4.6/core, that was just sent out. This contains: - A set of fixes for lightnvm. One from Alan, fixing an overflow, and the rest from the usual suspects, Javier and Matias. - A set of fixes for nbd from Markus and Dan, and a fixup from Arnd for correct usage of the signed 64-bit divider. - A set of bug fixes for the Micron mtip32xx, from Asai. - A fix for the brd discard handling from Bart. - Update the maintainers entry for cciss, since that hardware has transferred ownership. - Three bug fixes for bcache from Eric Wheeler. - Set of fixes for xen-blk{back,front} from Jan and Konrad. - Removal of the cpqarray driver. It has been disabled in Kconfig since 2013, and we were initially scheduled to remove it in 3.15. - Various updates and fixes for NVMe, with the most important being: - Removal of the per-device NVMe thread, replacing that with a watchdog timer instead. From Christoph. - Exposing the namespace WWID through sysfs, from Keith. - Set of cleanups from Ming Lin. - Logging the controller device name instead of the underlying PCI device name, from Sagi. - And a bunch of fixes and optimizations from the usual suspects in this area" * 'for-4.6/drivers' of git://git.kernel.dk/linux-block: (49 commits) NVMe: Expose ns wwid through single sysfs entry drivers:block: cpqarray clean up brd: Fix discard request processing cpqarray: remove it from the kernel cciss: update MAINTAINERS NVMe: Remove unused sq_head read in completion path bcache: fix cache_set_flush() NULL pointer dereference on OOM bcache: cleaned up error handling around register_cache() bcache: fix race of writeback thread starting before complete initialization NVMe: Create discard zero quirk white list nbd: use correct div_s64 helper mtip32xx: remove unneeded variable in mtip_cmd_timeout() lightnvm: generalize rrpc ppa calculations lightnvm: remove struct nvm_dev->total_blocks lightnvm: rename ->nr_pages to ->nr_sects lightnvm: update closed list outside of intr context xen/blback: Fit the important information of the thread in 17 characters lightnvm: fold get bb tbl when using dual/quad plane mode lightnvm: fix up nonsensical configure overrun checking xen-blkback: advertise indirect segment support earlier ...
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/Kconfig6
-rw-r--r--drivers/nvme/host/Makefile10
-rw-r--r--drivers/nvme/host/core.c166
-rw-r--r--drivers/nvme/host/lightnvm.c46
-rw-r--r--drivers/nvme/host/nvme.h14
-rw-r--r--drivers/nvme/host/pci.c235
6 files changed, 275 insertions, 202 deletions
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index b586d84f2518..c894841c6456 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -1,6 +1,10 @@
+config NVME_CORE
+ tristate
+
config BLK_DEV_NVME
tristate "NVM Express block device"
depends on PCI && BLOCK
+ select NVME_CORE
---help---
The NVM Express driver is for solid state drives directly
connected to the PCI or PCI Express bus. If you know you
@@ -11,7 +15,7 @@ config BLK_DEV_NVME
config BLK_DEV_NVME_SCSI
bool "SCSI emulation for NVMe device nodes"
- depends on BLK_DEV_NVME
+ depends on NVME_CORE
---help---
This adds support for the SG_IO ioctl on the NVMe character
and block devices nodes, as well a a translation for a small
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index 51bf90871549..9a3ca892b4a7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,6 +1,8 @@
+obj-$(CONFIG_NVME_CORE) += nvme-core.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
-obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
+nvme-core-y := core.o
+nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
+nvme-core-$(CONFIG_NVM) += lightnvm.o
-lightnvm-$(CONFIG_NVM) := lightnvm.o
-nvme-y += core.o pci.o $(lightnvm-y)
-nvme-$(CONFIG_BLK_DEV_NVME_SCSI) += scsi.o
+nvme-y += pci.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 03c46412fff4..643f457131c2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -33,6 +33,20 @@
#define NVME_MINORS (1U << MINORBITS)
+unsigned char admin_timeout = 60;
+module_param(admin_timeout, byte, 0644);
+MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
+EXPORT_SYMBOL_GPL(admin_timeout);
+
+unsigned char nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+EXPORT_SYMBOL_GPL(nvme_io_timeout);
+
+unsigned char shutdown_timeout = 5;
+module_param(shutdown_timeout, byte, 0644);
+MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
+
static int nvme_major;
module_param(nvme_major, int, 0);
@@ -40,7 +54,7 @@ static int nvme_char_major;
module_param(nvme_char_major, int, 0);
static LIST_HEAD(nvme_ctrl_list);
-DEFINE_SPINLOCK(dev_list_lock);
+static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
@@ -72,11 +86,21 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
spin_lock(&dev_list_lock);
ns = disk->private_data;
- if (ns && !kref_get_unless_zero(&ns->kref))
- ns = NULL;
+ if (ns) {
+ if (!kref_get_unless_zero(&ns->kref))
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+ }
spin_unlock(&dev_list_lock);
return ns;
+
+fail_put_ns:
+ kref_put(&ns->kref, nvme_free_ns);
+fail:
+ spin_unlock(&dev_list_lock);
+ return NULL;
}
void nvme_requeue_req(struct request *req)
@@ -89,6 +113,7 @@ void nvme_requeue_req(struct request *req)
blk_mq_kick_requeue_list(req->q);
spin_unlock_irqrestore(req->q->queue_lock, flags);
}
+EXPORT_SYMBOL_GPL(nvme_requeue_req);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags)
@@ -108,17 +133,18 @@ struct request *nvme_alloc_request(struct request_queue *q,
req->cmd = (unsigned char *)cmd;
req->cmd_len = sizeof(struct nvme_command);
- req->special = (void *)0;
return req;
}
+EXPORT_SYMBOL_GPL(nvme_alloc_request);
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
*/
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, unsigned bufflen, u32 *result, unsigned timeout)
+ struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ unsigned timeout)
{
struct request *req;
int ret;
@@ -128,6 +154,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ req->special = cqe;
if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
@@ -136,8 +163,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
blk_execute_rq(req->q, NULL, req, 0);
- if (result)
- *result = (u32)(uintptr_t)req->special;
ret = req->errors;
out:
blk_mq_free_request(req);
@@ -147,8 +172,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
- return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
+ return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0);
}
+EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen,
@@ -156,6 +182,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
u32 *result, unsigned timeout)
{
bool write = cmd->common.opcode & 1;
+ struct nvme_completion cqe;
struct nvme_ns *ns = q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
struct request *req;
@@ -168,6 +195,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
return PTR_ERR(req);
req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
+ req->special = &cqe;
if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
@@ -222,7 +250,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
blk_execute_rq(req->q, disk, req, 0);
ret = req->errors;
if (result)
- *result = (u32)(uintptr_t)req->special;
+ *result = le32_to_cpu(cqe.result);
if (meta && !ret && !write) {
if (copy_to_user(meta_buffer, meta, meta_len))
ret = -EFAULT;
@@ -303,6 +331,8 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
+ struct nvme_completion cqe;
+ int ret;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_get_features;
@@ -310,13 +340,18 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid,
c.features.prp1 = cpu_to_le64(dma_addr);
c.features.fid = cpu_to_le32(fid);
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ if (ret >= 0)
+ *result = le32_to_cpu(cqe.result);
+ return ret;
}
int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result)
{
struct nvme_command c;
+ struct nvme_completion cqe;
+ int ret;
memset(&c, 0, sizeof(c));
c.features.opcode = nvme_admin_set_features;
@@ -324,7 +359,10 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
c.features.fid = cpu_to_le32(fid);
c.features.dword11 = cpu_to_le32(dword11);
- return __nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0, result, 0);
+ ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0);
+ if (ret >= 0)
+ *result = le32_to_cpu(cqe.result);
+ return ret;
}
int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log)
@@ -364,6 +402,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
*count = min(*count, nr_io_queues);
return 0;
}
+EXPORT_SYMBOL_GPL(nvme_set_queue_count);
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
@@ -504,7 +543,10 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -545,8 +587,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
static void nvme_config_discard(struct nvme_ns *ns)
{
+ struct nvme_ctrl *ctrl = ns->ctrl;
u32 logical_block_size = queue_logical_block_size(ns->queue);
- ns->queue->limits.discard_zeroes_data = 0;
+
+ if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
+ ns->queue->limits.discard_zeroes_data = 1;
+ else
+ ns->queue->limits.discard_zeroes_data = 0;
+
ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size;
blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
@@ -566,8 +614,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
- dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
- __func__, ns->ctrl->instance, ns->ns_id);
+ dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+ __func__);
return -ENODEV;
}
if (id->ncap == 0) {
@@ -577,7 +625,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
if (nvme_nvm_register(ns->queue, disk->disk_name)) {
- dev_warn(ns->ctrl->dev,
+ dev_warn(disk_to_dev(ns->disk),
"%s: LightNVM init failure\n", __func__);
kfree(id);
return -ENODEV;
@@ -750,7 +798,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Device not ready; aborting %s\n", enabled ?
"initialisation" : "reset");
return -ENODEV;
@@ -778,6 +826,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
return ret;
return nvme_wait_ready(ctrl, cap, false);
}
+EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
{
@@ -790,7 +839,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
int ret;
if (page_shift < dev_page_min) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Minimum device page size %u too large for host (%u)\n",
1 << dev_page_min, 1 << page_shift);
return -ENODEV;
@@ -809,6 +858,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
return ret;
return nvme_wait_ready(ctrl, cap, true);
}
+EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
{
@@ -831,7 +881,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
if (fatal_signal_pending(current))
return -EINTR;
if (time_after(jiffies, timeout)) {
- dev_err(ctrl->dev,
+ dev_err(ctrl->device,
"Device shutdown incomplete; abort shutdown\n");
return -ENODEV;
}
@@ -839,6 +889,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q)
@@ -870,13 +921,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
if (ret) {
- dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+ dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
return ret;
}
ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
if (ret) {
- dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+ dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
return ret;
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
@@ -886,13 +937,15 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = nvme_identify_ctrl(ctrl, &id);
if (ret) {
- dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+ dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
return -EIO;
}
+ ctrl->vid = le16_to_cpu(id->vid);
ctrl->oncs = le16_to_cpup(&id->oncs);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
+ ctrl->cntlid = le16_to_cpup(&id->cntlid);
memcpy(ctrl->serial, id->sn, sizeof(id->sn));
memcpy(ctrl->model, id->mn, sizeof(id->mn));
memcpy(ctrl->firmware_rev, id->fr, sizeof(id->fr));
@@ -919,6 +972,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
kfree(id);
return 0;
}
+EXPORT_SYMBOL_GPL(nvme_init_identify);
static int nvme_dev_open(struct inode *inode, struct file *file)
{
@@ -965,13 +1019,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
- dev_warn(ctrl->dev,
+ dev_warn(ctrl->device,
"NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
ret = -EINVAL;
goto out_unlock;
}
- dev_warn(ctrl->dev,
+ dev_warn(ctrl->device,
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
kref_get(&ns->kref);
mutex_unlock(&ctrl->namespaces_mutex);
@@ -997,7 +1051,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
case NVME_IOCTL_IO_CMD:
return nvme_dev_user_cmd(ctrl, argp);
case NVME_IOCTL_RESET:
- dev_warn(ctrl->dev, "resetting controller\n");
+ dev_warn(ctrl->device, "resetting controller\n");
return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
@@ -1028,6 +1082,30 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_ns *ns = dev_to_disk(dev)->private_data;
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ int serial_len = sizeof(ctrl->serial);
+ int model_len = sizeof(ctrl->model);
+
+ if (memchr_inv(ns->uuid, 0, sizeof(ns->uuid)))
+ return sprintf(buf, "eui.%16phN\n", ns->uuid);
+
+ if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
+ return sprintf(buf, "eui.%8phN\n", ns->eui);
+
+ while (ctrl->serial[serial_len - 1] == ' ')
+ serial_len--;
+ while (ctrl->model[model_len - 1] == ' ')
+ model_len--;
+
+ return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
+ serial_len, ctrl->serial, model_len, ctrl->model, ns->ns_id);
+}
+static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL);
+
static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1053,6 +1131,7 @@ static ssize_t nsid_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL);
static struct attribute *nvme_ns_attrs[] = {
+ &dev_attr_wwid.attr,
&dev_attr_uuid.attr,
&dev_attr_eui.attr,
&dev_attr_nsid.attr,
@@ -1081,7 +1160,7 @@ static const struct attribute_group nvme_ns_attr_group = {
.is_visible = nvme_attrs_are_visible,
};
-#define nvme_show_function(field) \
+#define nvme_show_str_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
@@ -1090,15 +1169,26 @@ static ssize_t field##_show(struct device *dev, \
} \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
-nvme_show_function(model);
-nvme_show_function(serial);
-nvme_show_function(firmware_rev);
+#define nvme_show_int_function(field) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
+ return sprintf(buf, "%d\n", ctrl->field); \
+} \
+static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
+
+nvme_show_str_function(model);
+nvme_show_str_function(serial);
+nvme_show_str_function(firmware_rev);
+nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
&dev_attr_model.attr,
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
+ &dev_attr_cntlid.attr,
NULL
};
@@ -1308,6 +1398,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
}
+EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
@@ -1316,6 +1407,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
}
+EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
static DEFINE_IDA(nvme_instance_ida);
@@ -1347,13 +1439,14 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
}
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
- {
+{
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
list_del(&ctrl->node);
spin_unlock(&dev_list_lock);
}
+EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
static void nvme_free_ctrl(struct kref *kref)
{
@@ -1370,6 +1463,7 @@ void nvme_put_ctrl(struct nvme_ctrl *ctrl)
{
kref_put(&ctrl->kref, nvme_free_ctrl);
}
+EXPORT_SYMBOL_GPL(nvme_put_ctrl);
/*
* Initialize a NVMe controller structures. This needs to be called during
@@ -1394,14 +1488,13 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
MKDEV(nvme_char_major, ctrl->instance),
- dev, nvme_dev_attr_groups,
+ ctrl, nvme_dev_attr_groups,
"nvme%d", ctrl->instance);
if (IS_ERR(ctrl->device)) {
ret = PTR_ERR(ctrl->device);
goto out_release_instance;
}
get_device(ctrl->device);
- dev_set_drvdata(ctrl->device, ctrl);
ida_init(&ctrl->ns_ida);
spin_lock(&dev_list_lock);
@@ -1414,6 +1507,7 @@ out_release_instance:
out:
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
/**
* nvme_kill_queues(): Ends all namespace queues
@@ -1446,6 +1540,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_kill_queues);
void nvme_stop_queues(struct nvme_ctrl *ctrl)
{
@@ -1462,6 +1557,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_stop_queues);
void nvme_start_queues(struct nvme_ctrl *ctrl)
{
@@ -1475,6 +1571,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
mutex_unlock(&ctrl->namespaces_mutex);
}
+EXPORT_SYMBOL_GPL(nvme_start_queues);
int __init nvme_core_init(void)
{
@@ -1514,3 +1611,8 @@ void nvme_core_exit(void)
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
}
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+module_init(nvme_core_init);
+module_exit(nvme_core_exit);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6bb15e4926dc..42a01a931989 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -379,8 +379,31 @@ out:
return ret;
}
+static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
+ int nr_dst_blks, u8 *dst_blks,
+ int nr_src_blks, u8 *src_blks)
+{
+ int blk, offset, pl, blktype;
+
+ for (blk = 0; blk < nr_dst_blks; blk++) {
+ offset = blk * nvmdev->plane_mode;
+ blktype = src_blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < nvmdev->plane_mode; pl++) {
+ if (src_blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = src_blks[offset + pl];
+ break;
+ }
+ }
+
+ dst_blks[blk] = blktype;
+ }
+}
+
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_blocks, nvm_bb_update_fn *update_bbtbl,
+ int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
void *priv)
{
struct request_queue *q = nvmdev->q;
@@ -388,7 +411,9 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
+ u8 *dst_blks = NULL;
+ int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -399,6 +424,12 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
+ dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
+ if (!dst_blks) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
@@ -420,16 +451,21 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
ret = -EINVAL;
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_blocks);
+ le32_to_cpu(bb_tbl->tblks), nr_src_blks);
goto out;
}
+ nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
+ nr_src_blks, bb_tbl->blk);
+
ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv);
+ ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
+
out:
+ kfree(dst_blks);
kfree(bb_tbl);
return ret;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index fb15ba5f5d19..f846da4eb338 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -59,6 +59,12 @@ enum nvme_quirks {
* correctly.
*/
NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
+
+ /*
+ * The controller deterministically returns O's on reads to discarded
+ * logical blocks.
+ */
+ NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
};
struct nvme_ctrl {
@@ -78,6 +84,7 @@ struct nvme_ctrl {
char serial[20];
char model[40];
char firmware_rev[8];
+ int cntlid;
u32 ctrl_config;
@@ -85,6 +92,7 @@ struct nvme_ctrl {
u32 max_hw_sectors;
u32 stripe_size;
u16 oncs;
+ u16 vid;
atomic_t abort_limit;
u8 event_limit;
u8 vwc;
@@ -124,6 +132,7 @@ struct nvme_ns {
};
struct nvme_ctrl_ops {
+ struct module *module;
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -255,7 +264,8 @@ void nvme_requeue_req(struct request *req);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
- void *buffer, unsigned bufflen, u32 *result, unsigned timeout);
+ struct nvme_completion *cqe, void *buffer, unsigned bufflen,
+ unsigned timeout);
int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
void __user *ubuffer, unsigned bufflen, u32 *result,
unsigned timeout);
@@ -273,8 +283,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
dma_addr_t dma_addr, u32 *result);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
-extern spinlock_t dev_list_lock;
-
struct sg_io_hdr;
int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 680f5780750c..f8db70ae172d 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -27,7 +27,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
-#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -39,6 +38,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/t10-pi.h>
+#include <linux/timer.h>
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/unaligned.h>
@@ -57,18 +57,6 @@
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
-unsigned char admin_timeout = 60;
-module_param(admin_timeout, byte, 0644);
-MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
-
-unsigned char nvme_io_timeout = 30;
-module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
-MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
-
-unsigned char shutdown_timeout = 5;
-module_param(shutdown_timeout, byte, 0644);
-MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -76,10 +64,7 @@ static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
-static LIST_HEAD(dev_list);
-static struct task_struct *nvme_thread;
static struct workqueue_struct *nvme_workq;
-static wait_queue_head_t nvme_kthread_wait;
struct nvme_dev;
struct nvme_queue;
@@ -92,7 +77,6 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct list_head node;
struct nvme_queue **queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
@@ -110,6 +94,8 @@ struct nvme_dev {
struct work_struct reset_work;
struct work_struct scan_work;
struct work_struct remove_work;
+ struct work_struct async_work;
+ struct timer_list watchdog_timer;
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
@@ -148,7 +134,6 @@ struct nvme_queue {
u32 __iomem *q_db;
u16 q_depth;
s16 cq_vector;
- u16 sq_head;
u16 sq_tail;
u16 cq_head;
u16 qid;
@@ -303,17 +288,20 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
u16 status = le16_to_cpu(cqe->status) >> 1;
u32 result = le32_to_cpu(cqe->result);
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ)
+ if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
++dev->ctrl.event_limit;
+ queue_work(nvme_workq, &dev->async_work);
+ }
+
if (status != NVME_SC_SUCCESS)
return;
switch (result & 0xff07) {
case NVME_AER_NOTICE_NS_CHANGED:
- dev_info(dev->dev, "rescanning\n");
+ dev_info(dev->ctrl.device, "rescanning\n");
nvme_queue_scan(dev);
default:
- dev_warn(dev->dev, "async event result %08x\n", result);
+ dev_warn(dev->ctrl.device, "async event result %08x\n", result);
}
}
@@ -727,7 +715,7 @@ static void nvme_complete_rq(struct request *req)
}
if (unlikely(iod->aborted)) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"completing aborted command with status: %04x\n",
req->errors);
}
@@ -749,7 +737,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
if ((status & 1) != phase)
break;
- nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
if (++head == nvmeq->q_depth) {
head = 0;
phase = !phase;
@@ -759,7 +746,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*tag = -1;
if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
- dev_warn(nvmeq->q_dmadev,
+ dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
cqe.command_id, le16_to_cpu(cqe.sq_id));
continue;
@@ -778,10 +765,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
- u32 result = le32_to_cpu(cqe.result);
- req->special = (void *)(uintptr_t)result;
- }
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV && req->special)
+ memcpy(req->special, &cqe, sizeof(cqe));
blk_mq_complete_request(req, status >> 1);
}
@@ -846,15 +831,22 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return 0;
}
-static void nvme_submit_async_event(struct nvme_dev *dev)
+static void nvme_async_event_work(struct work_struct *work)
{
+ struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+ struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH + --dev->ctrl.event_limit;
- __nvme_submit_cmd(dev->queues[0], &c);
+ spin_lock_irq(&nvmeq->q_lock);
+ while (dev->ctrl.event_limit > 0) {
+ c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
+ --dev->ctrl.event_limit;
+ __nvme_submit_cmd(nvmeq, &c);
+ }
+ spin_unlock_irq(&nvmeq->q_lock);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -924,12 +916,10 @@ static void abort_endio(struct request *req, int error)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
- u32 result = (u32)(uintptr_t)req->special;
u16 status = req->errors;
- dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+ dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
-
blk_mq_free_request(req);
}
@@ -948,7 +938,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* shutdown, so we return BLK_EH_HANDLED.
*/
if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
@@ -962,7 +952,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* returned to the driver, or if this is the admin queue.
*/
if (!nvmeq->qid || iod->aborted) {
- dev_warn(dev->dev,
+ dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
@@ -988,8 +978,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
cmd.abort.cid = req->tag;
cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
- dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
- req->tag, nvmeq->qid);
+ dev_warn(nvmeq->dev->ctrl.device,
+ "I/O %d QID %d timeout, aborting\n",
+ req->tag, nvmeq->qid);
abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
BLK_MQ_REQ_NOWAIT);
@@ -1018,7 +1009,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
if (!blk_mq_request_started(req))
return;
- dev_dbg_ratelimited(nvmeq->q_dmadev,
+ dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
"Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
status = NVME_SC_ABORT_REQ;
@@ -1173,9 +1164,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq;
-
- /* make sure queue descriptor is set before queue count, for kthread */
- mb();
dev->queue_count++;
return nvmeq;
@@ -1360,53 +1348,31 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;
}
-static int nvme_kthread(void *data)
-{
- struct nvme_dev *dev, *next;
-
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock(&dev_list_lock);
- list_for_each_entry_safe(dev, next, &dev_list, node) {
- int i;
- u32 csts = readl(dev->bar + NVME_REG_CSTS);
-
- /*
- * Skip controllers currently under reset.
- */
- if (work_pending(&dev->reset_work) || work_busy(&dev->reset_work))
- continue;
-
- if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
- csts & NVME_CSTS_CFS) {
- if (queue_work(nvme_workq, &dev->reset_work)) {
- dev_warn(dev->dev,
- "Failed status: %x, reset controller\n",
- readl(dev->bar + NVME_REG_CSTS));
- }
- continue;
- }
- for (i = 0; i < dev->queue_count; i++) {
- struct nvme_queue *nvmeq = dev->queues[i];
- if (!nvmeq)
- continue;
- spin_lock_irq(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
-
- while (i == 0 && dev->ctrl.event_limit > 0)
- nvme_submit_async_event(dev);
- spin_unlock_irq(&nvmeq->q_lock);
- }
+static void nvme_watchdog_timer(unsigned long data)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)data;
+ u32 csts = readl(dev->bar + NVME_REG_CSTS);
+
+ /*
+ * Skip controllers currently under reset.
+ */
+ if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
+ ((csts & NVME_CSTS_CFS) ||
+ (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
+ if (queue_work(nvme_workq, &dev->reset_work)) {
+ dev_warn(dev->dev,
+ "Failed status: 0x%x, reset controller.\n",
+ csts);
}
- spin_unlock(&dev_list_lock);
- schedule_timeout(round_jiffies_relative(HZ));
+ return;
}
- return 0;
+
+ mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
}
static int nvme_create_io_queues(struct nvme_dev *dev)
{
- unsigned i;
+ unsigned i, max;
int ret = 0;
for (i = dev->queue_count; i <= dev->max_qid; i++) {
@@ -1416,7 +1382,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
}
}
- for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
+ max = min(dev->max_qid, dev->queue_count - 1);
+ for (i = dev->online_queues; i <= max; i++) {
ret = nvme_create_queue(dev->queues[i], i);
if (ret) {
nvme_free_queues(dev, i);
@@ -1507,7 +1474,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* access to the admin queue, as that might be only way to fix them up.
*/
if (result > 0) {
- dev_err(dev->dev, "Could not set queue count (%d)\n", result);
+ dev_err(dev->ctrl.device,
+ "Could not set queue count (%d)\n", result);
nr_io_queues = 0;
result = 0;
}
@@ -1573,9 +1541,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
adminq->cq_vector = -1;
goto free_queues;
}
-
- /* Free previously allocated queues that are no longer usable */
- nvme_free_queues(dev, nr_io_queues + 1);
return nvme_create_io_queues(dev);
free_queues:
@@ -1709,7 +1674,13 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (blk_mq_alloc_tag_set(&dev->tagset))
return 0;
dev->ctrl.tagset = &dev->tagset;
+ } else {
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+
+ /* Free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
}
+
nvme_queue_scan(dev);
return 0;
}
@@ -1796,56 +1767,12 @@ static void nvme_pci_disable(struct nvme_dev *dev)
}
}
-static int nvme_dev_list_add(struct nvme_dev *dev)
-{
- bool start_thread = false;
-
- spin_lock(&dev_list_lock);
- if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
- start_thread = true;
- nvme_thread = NULL;
- }
- list_add(&dev->node, &dev_list);
- spin_unlock(&dev_list_lock);
-
- if (start_thread) {
- nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
- wake_up_all(&nvme_kthread_wait);
- } else
- wait_event_killable(nvme_kthread_wait, nvme_thread);
-
- if (IS_ERR_OR_NULL(nvme_thread))
- return nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
-
- return 0;
-}
-
-/*
-* Remove the node from the device list and check
-* for whether or not we need to stop the nvme_thread.
-*/
-static void nvme_dev_list_remove(struct nvme_dev *dev)
-{
- struct task_struct *tmp = NULL;
-
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
- tmp = nvme_thread;
- nvme_thread = NULL;
- }
- spin_unlock(&dev_list_lock);
-
- if (tmp)
- kthread_stop(tmp);
-}
-
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
int i;
u32 csts = -1;
- nvme_dev_list_remove(dev);
+ del_timer_sync(&dev->watchdog_timer);
mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(to_pci_dev(dev->dev))) {
@@ -1907,7 +1834,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
{
- dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
+ dev_warn(dev->ctrl.device, "Removing after probe failure status: %d\n", status);
kref_get(&dev->ctrl.kref);
nvme_dev_disable(dev, false);
@@ -1954,17 +1881,16 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
+ queue_work(nvme_workq, &dev->async_work);
- result = nvme_dev_list_add(dev);
- if (result)
- goto out;
+ mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
/*
* Keep the controller around but remove all namespaces if we don't have
* any working I/O queue.
*/
if (dev->online_queues < 2) {
- dev_warn(dev->dev, "IO queues not created\n");
+ dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_remove_namespaces(&dev->ctrl);
} else {
nvme_start_queues(&dev->ctrl);
@@ -2032,6 +1958,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+ .module = THIS_MODULE,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -2089,10 +2016,12 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto free;
- INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
+ INIT_WORK(&dev->async_work, nvme_async_event_work);
+ setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
+ (unsigned long)dev);
mutex_init(&dev->shutdown_lock);
init_completion(&dev->ioq_wait);
@@ -2105,6 +2034,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
+ dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
queue_work(nvme_workq, &dev->reset_work);
return 0;
@@ -2145,8 +2076,11 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+ del_timer_sync(&dev->watchdog_timer);
+
set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->async_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
@@ -2192,7 +2126,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback.
*/
- dev_warn(&pdev->dev, "error detected: state:%d\n", state);
+ dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
@@ -2209,7 +2143,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- dev_info(&pdev->dev, "restart after slot reset\n");
+ dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
queue_work(nvme_workq, &dev->reset_work);
return PCI_ERS_RESULT_RECOVERED;
@@ -2232,7 +2166,8 @@ static const struct pci_error_handlers nvme_err_handler = {
static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
- .driver_data = NVME_QUIRK_STRIPE_SIZE, },
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
@@ -2257,34 +2192,20 @@ static int __init nvme_init(void)
{
int result;
- init_waitqueue_head(&nvme_kthread_wait);
-
nvme_workq = alloc_workqueue("nvme", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!nvme_workq)
return -ENOMEM;
- result = nvme_core_init();
- if (result < 0)
- goto kill_workq;
-
result = pci_register_driver(&nvme_driver);
if (result)
- goto core_exit;
- return 0;
-
- core_exit:
- nvme_core_exit();
- kill_workq:
- destroy_workqueue(nvme_workq);
+ destroy_workqueue(nvme_workq);
return result;
}
static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
- nvme_core_exit();
destroy_workqueue(nvme_workq);
- BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
_nvme_check_size();
}