diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 12:21:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-30 12:21:16 -0700 |
commit | 440462198d9c45e48f2d8d9b18c5702d92282f46 (patch) | |
tree | 9aab5db02f35d0cf9034108116b6a483147791ad /drivers/nvme/host | |
parent | df668a5fe461bb9d7e899c538acc7197746038f4 (diff) | |
parent | 5ed9b357024dc43f75099f597187df05bcd5173c (diff) |
Merge tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block
Pull block driver updates from Jens Axboe:
"Pretty calm round, mostly just NVMe and a bit of MD:
- NVMe updates (via Christoph)
- improve the APST configuration algorithm (Alexey Bogoslavsky)
- look for StorageD3Enable on companion ACPI device
(Mario Limonciello)
- allow selecting the network interface for TCP connections
(Martin Belanger)
- misc cleanups (Amit Engel, Chaitanya Kulkarni, Colin Ian King,
Christoph)
- move the ACPI StorageD3 code to drivers/acpi/ and add quirks
for certain AMD CPUs (Mario Limonciello)
- zoned device support for nvmet (Chaitanya Kulkarni)
- fix the rules for changing the serial number in nvmet
(Noam Gottlieb)
- various small fixes and cleanups (Dan Carpenter, JK Kim,
Chaitanya Kulkarni, Hannes Reinecke, Wesley Sheng, Geert
Uytterhoeven, Daniel Wagner)
- MD updates (Via Song)
- iostats rewrite (Guoqing Jiang)
- raid5 lock contention optimization (Gal Ofri)
- Fall through warning fix (Gustavo)
- Misc fixes (Gustavo, Jiapeng)"
* tag 'for-5.14/drivers-2021-06-29' of git://git.kernel.dk/linux-block: (78 commits)
nvmet: use NVMET_MAX_NAMESPACES to set nn value
loop: Fix missing discard support when using LOOP_CONFIGURE
nvme.h: add missing nvme_lba_range_type endianness annotations
nvme: remove zeroout memset call for struct
nvme-pci: remove zeroout memset call for struct
nvmet: remove zeroout memset call for struct
nvmet: add ZBD over ZNS backend support
nvmet: add Command Set Identifier support
nvmet: add nvmet_req_bio put helper for backends
nvmet: add req cns error complete helper
block: export blk_next_bio()
nvmet: remove local variable
nvmet: use nvme status value directly
nvmet: use u32 type for the local variable nsid
nvmet: use u32 for nvmet_subsys max_nsid
nvmet: use req->cmd directly in file-ns fast path
nvmet: use req->cmd directly in bdev-ns fast path
nvmet: make ver stable once connection established
nvmet: allow mn change if subsys not discovered
nvmet: make sn stable once connection was established
...
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/Kconfig | 2 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 192 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.c | 58 | ||||
-rw-r--r-- | drivers/nvme/host/fabrics.h | 6 | ||||
-rw-r--r-- | drivers/nvme/host/fc.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 61 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 33 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 17 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 82 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 2 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 31 | ||||
-rw-r--r-- | drivers/nvme/host/zns.c | 27 |
12 files changed, 278 insertions, 235 deletions
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 494675aeaaad..c3f3d77f1aac 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -21,7 +21,7 @@ config NVME_MULTIPATH help This option enables support for multipath access to NVMe subsystems. If this option is enabled only a single - /dev/nvmeXnY device will show up for each NVMe namespaces, + /dev/nvmeXnY device will show up for each NVMe namespace, even if it is accessible through multiple controllers. config NVME_HWMON diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 01889a8abb6b..80c656dcbbac 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -57,6 +57,26 @@ static bool force_apst; module_param(force_apst, bool, 0644); MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); +static unsigned long apst_primary_timeout_ms = 100; +module_param(apst_primary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_primary_timeout_ms, + "primary APST timeout in ms"); + +static unsigned long apst_secondary_timeout_ms = 2000; +module_param(apst_secondary_timeout_ms, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_timeout_ms, + "secondary APST timeout in ms"); + +static unsigned long apst_primary_latency_tol_us = 15000; +module_param(apst_primary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_primary_latency_tol_us, + "primary APST latency tolerance in us"); + +static unsigned long apst_secondary_latency_tol_us = 100000; +module_param(apst_secondary_latency_tol_us, ulong, 0644); +MODULE_PARM_DESC(apst_secondary_latency_tol_us, + "secondary APST latency tolerance in us"); + static bool streams; module_param(streams, bool, 0644); MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); @@ -701,9 +721,7 @@ EXPORT_SYMBOL_GPL(__nvme_check_ready); static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) { - struct nvme_command c; - - memset(&c, 0, sizeof(c)); + struct nvme_command c = { }; c.directive.opcode = nvme_admin_directive_send; c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); @@ -728,9 +746,8 @@ static int nvme_enable_streams(struct nvme_ctrl *ctrl) static int nvme_get_stream_params(struct nvme_ctrl *ctrl, struct streams_directive_params *s, u32 nsid) { - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); memset(s, 0, sizeof(*s)); c.directive.opcode = nvme_admin_directive_recv; @@ -1440,10 +1457,9 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, u32 *result) { union nvme_result res = { 0 }; - struct nvme_command c; + struct nvme_command c = { }; int ret; - memset(&c, 0, sizeof(c)); c.features.opcode = op; c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); @@ -1522,36 +1538,6 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl) queue_work(nvme_wq, &ctrl->async_event_work); } -/* - * Issue ioctl requests on the first available path. Note that unlike normal - * block layer requests we will not retry failed request on another controller. - */ -struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, - struct nvme_ns_head **head, int *srcu_idx) -{ -#ifdef CONFIG_NVME_MULTIPATH - if (disk->fops == &nvme_ns_head_ops) { - struct nvme_ns *ns; - - *head = disk->private_data; - *srcu_idx = srcu_read_lock(&(*head)->srcu); - ns = nvme_find_path(*head); - if (!ns) - srcu_read_unlock(&(*head)->srcu, *srcu_idx); - return ns; - } -#endif - *head = NULL; - *srcu_idx = -1; - return disk->private_data; -} - -void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) -{ - if (head) - srcu_read_unlock(&head->srcu, idx); -} - static int nvme_ns_open(struct nvme_ns *ns) { @@ -1601,9 +1587,8 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, u32 max_integrity_segments) { - struct blk_integrity integrity; + struct blk_integrity integrity = { }; - memset(&integrity, 0, sizeof(integrity)); switch (pi_type) { case NVME_NS_DPS_PI_TYPE3: integrity.profile = &t10_pi_type3_crc; @@ -1948,30 +1933,45 @@ static char nvme_pr_type(enum pr_type type) } }; +static int nvme_send_ns_head_pr_command(struct block_device *bdev, + struct nvme_command *c, u8 data[16]) +{ + struct nvme_ns_head *head = bdev->bd_disk->private_data; + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EWOULDBLOCK; + + if (ns) { + c->common.nsid = cpu_to_le32(ns->head->ns_id); + ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); + } + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, + u8 data[16]) +{ + c->common.nsid = cpu_to_le32(ns->head->ns_id); + return nvme_submit_sync_cmd(ns->queue, c, data, 16); +} + static int nvme_pr_command(struct block_device *bdev, u32 cdw10, u64 key, u64 sa_key, u8 op) { - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - struct nvme_command c; - int srcu_idx, ret; + struct nvme_command c = { }; u8 data[16] = { 0, }; - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; - put_unaligned_le64(key, &data[0]); put_unaligned_le64(sa_key, &data[8]); - memset(&c, 0, sizeof(c)); c.common.opcode = op; - c.common.nsid = cpu_to_le32(ns->head->ns_id); c.common.cdw10 = cpu_to_le32(cdw10); - ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); - nvme_put_ns_from_disk(head, srcu_idx); - return ret; + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && + bdev->bd_disk->fops == &nvme_ns_head_ops) + return nvme_send_ns_head_pr_command(bdev, &c, data); + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, data); } static int nvme_pr_register(struct block_device *bdev, u64 old, @@ -2036,9 +2036,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) { struct nvme_ctrl *ctrl = data; - struct nvme_command cmd; + struct nvme_command cmd = { }; - memset(&cmd, 0, sizeof(cmd)); if (send) cmd.common.opcode = nvme_admin_security_send; else @@ -2053,6 +2052,17 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, EXPORT_SYMBOL_GPL(nvme_sec_submit); #endif /* CONFIG_BLK_SED_OPAL */ +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, + data); +} +#else +#define nvme_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + static const struct block_device_operations nvme_bdev_ops = { .owner = THIS_MODULE, .ioctl = nvme_ioctl, @@ -2218,13 +2228,53 @@ static int nvme_configure_acre(struct nvme_ctrl *ctrl) } /* + * The function checks whether the given total (exlat + enlat) latency of + * a power state allows the latter to be used as an APST transition target. + * It does so by comparing the latency to the primary and secondary latency + * tolerances defined by module params. If there's a match, the corresponding + * timeout value is returned and the matching tolerance index (1 or 2) is + * reported. + */ +static bool nvme_apst_get_transition_time(u64 total_latency, + u64 *transition_time, unsigned *last_index) +{ + if (total_latency <= apst_primary_latency_tol_us) { + if (*last_index == 1) + return false; + *last_index = 1; + *transition_time = apst_primary_timeout_ms; + return true; + } + if (apst_secondary_timeout_ms && + total_latency <= apst_secondary_latency_tol_us) { + if (*last_index <= 2) + return false; + *last_index = 2; + *transition_time = apst_secondary_timeout_ms; + return true; + } + return false; +} + +/* * APST (Autonomous Power State Transition) lets us program a table of power * state transitions that the controller will perform automatically. - * We configure it with a simple heuristic: we are willing to spend at most 2% - * of the time transitioning between power states. Therefore, when running in - * any given state, we will enter the next lower-power non-operational state - * after waiting 50 * (enlat + exlat) microseconds, as long as that state's exit - * latency is under the requested maximum latency. + * + * Depending on module params, one of the two supported techniques will be used: + * + * - If the parameters provide explicit timeouts and tolerances, they will be + * used to build a table with up to 2 non-operational states to transition to. + * The default parameter values were selected based on the values used by + * Microsoft's and Intel's NVMe drivers. Yet, since we don't implement dynamic + * regeneration of the APST table in the event of switching between external + * and battery power, the timeouts and tolerances reflect a compromise + * between values used by Microsoft for AC and battery scenarios. + * - If not, we'll configure the table with a simple heuristic: we are willing + * to spend at most 2% of the time transitioning between power states. + * Therefore, when running in any given state, we will enter the next + * lower-power non-operational state after waiting 50 * (enlat + exlat) + * microseconds, as long as that state's exit latency is under the requested + * maximum latency. * * We will not autonomously enter any non-operational state for which the total * latency exceeds ps_max_latency_us. @@ -2240,6 +2290,7 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) int max_ps = -1; int state; int ret; + unsigned last_lt_index = UINT_MAX; /* * If APST isn't supported or if we haven't been initialized yet, @@ -2298,13 +2349,19 @@ static int nvme_configure_apst(struct nvme_ctrl *ctrl) le32_to_cpu(ctrl->psd[state].entry_lat); /* - * This state is good. Use it as the APST idle target for - * higher power states. + * This state is good. It can be used as the APST idle target + * for higher power states. */ - transition_ms = total_latency_us + 19; - do_div(transition_ms, 20); - if (transition_ms > (1 << 24) - 1) - transition_ms = (1 << 24) - 1; + if (apst_primary_timeout_ms && apst_primary_latency_tol_us) { + if (!nvme_apst_get_transition_time(total_latency_us, + &transition_ms, &last_lt_index)) + continue; + } else { + transition_ms = total_latency_us + 19; + do_div(transition_ms, 20); + if (transition_ms > (1 << 24) - 1) + transition_ms = (1 << 24) - 1; + } target = cpu_to_le64((state << 3) | (transition_ms << 8)); if (max_ps == -1) @@ -4068,6 +4125,11 @@ static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", opts->host_traddr ?: "none"); + if (ret) + return ret; + + ret = add_uevent_var(env, "NVME_HOST_IFACE=%s", + opts->host_iface ?: "none"); } return ret; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 34a84d2086c7..1e6a7cc056ca 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -112,6 +112,9 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) len += scnprintf(buf + len, size - len, "%shost_traddr=%s", (len) ? "," : "", ctrl->opts->host_traddr); + if (ctrl->opts->mask & NVMF_OPT_HOST_IFACE) + len += scnprintf(buf + len, size - len, "%shost_iface=%s", + (len) ? "," : "", ctrl->opts->host_iface); len += scnprintf(buf + len, size - len, "\n"); return len; @@ -187,11 +190,10 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read32); */ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) { - struct nvme_command cmd; + struct nvme_command cmd = { }; union nvme_result res; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.prop_get.opcode = nvme_fabrics_command; cmd.prop_get.fctype = nvme_fabrics_type_property_get; cmd.prop_get.attrib = 1; @@ -233,10 +235,9 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64); */ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) { - struct nvme_command cmd; + struct nvme_command cmd = { }; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.prop_set.opcode = nvme_fabrics_command; cmd.prop_set.fctype = nvme_fabrics_type_property_set; cmd.prop_set.attrib = 0; @@ -254,28 +255,23 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) EXPORT_SYMBOL_GPL(nvmf_reg_write32); /** - * nvmf_log_connect_error() - Error-parsing-diagnostic print - * out function for connect() errors. - * - * @ctrl: the specific /dev/nvmeX device that had the error. - * - * @errval: Error code to be decoded in a more human-friendly - * printout. - * - * @offset: For use with the NVMe error code NVME_SC_CONNECT_INVALID_PARAM. - * - * @cmd: This is the SQE portion of a submission capsule. - * - * @data: This is the "Data" portion of a submission capsule. + * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for + * connect() errors. + * @ctrl: The specific /dev/nvmeX device that had the error. + * @errval: Error code to be decoded in a more human-friendly + * printout. + * @offset: For use with the NVMe error code + * NVME_SC_CONNECT_INVALID_PARAM. + * @cmd: This is the SQE portion of a submission capsule. + * @data: This is the "Data" portion of a submission capsule. */ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & (~NVME_SC_DNR); + int err_sctype = errval & ~NVME_SC_DNR; switch (err_sctype) { - case (NVME_SC_CONNECT_INVALID_PARAM): if (offset >> 16) { char *inv_data = "Connect Invalid Data Parameter"; @@ -318,35 +314,30 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, } } break; - case NVME_SC_CONNECT_INVALID_HOST: dev_err(ctrl->device, "Connect for subsystem %s is not allowed, hostnqn: %s\n", data->subsysnqn, data->hostnqn); break; - case NVME_SC_CONNECT_CTRL_BUSY: dev_err(ctrl->device, "Connect command failed: controller is busy or not available\n"); break; - case NVME_SC_CONNECT_FORMAT: dev_err(ctrl->device, "Connect incompatible format: %d", cmd->connect.recfmt); break; - case NVME_SC_HOST_PATH_ERROR: dev_err(ctrl->device, "Connect command failed: host path error\n"); break; - default: dev_err(ctrl->device, "Connect command failed, error wo/DNR bit: %d\n", err_sctype); break; - } /* switch (err_sctype) */ + } } /** @@ -371,12 +362,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, */ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) { - struct nvme_command cmd; + struct nvme_command cmd = { }; union nvme_result res; struct nvmf_connect_data *data; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.connect.opcode = nvme_fabrics_command; cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.qid = 0; @@ -439,12 +429,11 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); */ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll) { - struct nvme_command cmd; + struct nvme_command cmd = { }; struct nvmf_connect_data *data; union nvme_result res; int ret; - memset(&cmd, 0, sizeof(cmd)); cmd.connect.opcode = nvme_fabrics_command; cmd.connect.fctype = nvme_fabrics_type_connect; cmd.connect.qid = cpu_to_le16(qid); @@ -550,6 +539,7 @@ static const match_table_t opt_tokens = { { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, + { NVMF_OPT_HOST_IFACE, "host_iface=%s" }, { NVMF_OPT_HOST_ID, "hostid=%s" }, { NVMF_OPT_DUP_CONNECT, "duplicate_connect" }, { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" }, @@ -759,6 +749,15 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, kfree(opts->host_traddr); opts->host_traddr = p; break; + case NVMF_OPT_HOST_IFACE: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + kfree(opts->host_iface); + opts->host_iface = p; + break; case NVMF_OPT_HOST_ID: p = match_strdup(args); if (!p) { @@ -943,6 +942,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) kfree(opts->trsvcid); kfree(opts->subsysnqn); kfree(opts->host_traddr); + kfree(opts->host_iface); kfree(opts); } EXPORT_SYMBOL_GPL(nvmf_free_options); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index d7f7974dc208..c31dad69a773 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -66,6 +66,7 @@ enum { NVMF_OPT_NR_POLL_QUEUES = 1 << 18, NVMF_OPT_TOS = 1 << 19, NVMF_OPT_FAIL_FAST_TMO = 1 << 20, + NVMF_OPT_HOST_IFACE = 1 << 21, }; /** @@ -83,7 +84,9 @@ enum { * @trsvcid: The transport-specific TRSVCID field for a port on the * subsystem which is adding a controller. * @host_traddr: A transport-specific field identifying the NVME host port - * to use for the connection to the controller. + * to use for the connection to the controller. + * @host_iface: A transport-specific field identifying the NVME host + * interface to use for the connection to the controller. * @queue_size: Number of IO queue elements. * @nr_io_queues: Number of controller IO queues that will be established. * @reconnect_delay: Time between two consecutive reconnect attempts. @@ -108,6 +111,7 @@ struct nvmf_ctrl_options { char *traddr; char *trsvcid; char *host_traddr; + char *host_iface; size_t queue_size; unsigned int nr_io_queues; unsigned int reconnect_delay; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f183f9fa03d0..7600863f7752 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3112,7 +3112,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) } /* FC-NVME supports normal SGL Data Block Descriptors */ - if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) { + if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 9557ead02de1..d93928d1e5bd 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -177,6 +177,20 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) metadata, meta_len, lower_32_bits(io.slba), NULL, 0); } +static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, + struct nvme_ns *ns, __u32 nsid) +{ + if (ns && nsid != ns->head->ns_id) { + dev_err(ctrl->device, + "%s: nsid (%u) in cmd does not match nsid (%u)" + "of namespace\n", + current->comm, nsid, ns->head->ns_id); + return false; + } + + return true; +} + static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_passthru_cmd __user *ucmd) { @@ -192,12 +206,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return -EFAULT; if (cmd.flags) return -EINVAL; - if (ns && cmd.nsid != ns->head->ns_id) { - dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", - current->comm, cmd.nsid, ns->head->ns_id); + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; - } memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; @@ -242,12 +252,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return -EFAULT; if (cmd.flags) return -EINVAL; - if (ns && cmd.nsid != ns->head->ns_id) { - dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", - current->comm, cmd.nsid, ns->head->ns_id); + if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) return -EINVAL; - } memset(&c, 0, sizeof(c)); c.common.opcode = cmd.opcode; @@ -372,12 +378,13 @@ long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #ifdef CONFIG_NVME_MULTIPATH static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *argp, struct nvme_ns_head *head, int srcu_idx) + __releases(&head->srcu) { struct nvme_ctrl *ctrl = ns->ctrl; int ret; nvme_get_ctrl(ns->ctrl); - nvme_put_ns_from_disk(head, srcu_idx); + srcu_read_unlock(&head->srcu, srcu_idx); ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); nvme_put_ctrl(ctrl); @@ -387,14 +394,15 @@ static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct nvme_ns_head *head = NULL; + struct nvme_ns_head *head = bdev->bd_disk->private_data; void __user *argp = (void __user *)arg; struct nvme_ns *ns; - int srcu_idx, ret; + int srcu_idx, ret = -EWOULDBLOCK; - ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (!ns) + goto out_unlock; /* * Handle ioctls that apply to the controller instead of the namespace @@ -402,12 +410,11 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, * deadlock when deleting namespaces using the passthrough interface. */ if (is_ctrl_ioctl(cmd)) - ret = nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); - else { - ret = nvme_ns_ioctl(ns, cmd, argp); - nvme_put_ns_from_disk(head, srcu_idx); - } + return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); + ret = nvme_ns_ioctl(ns, cmd, argp); +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); return ret; } @@ -419,21 +426,19 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, container_of(cdev, struct nvme_ns_head, cdev); void __user *argp = (void __user *)arg; struct nvme_ns *ns; - int srcu_idx, ret; + int srcu_idx, ret = -EWOULDBLOCK; srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); - if (!ns) { - srcu_read_unlock(&head->srcu, srcu_idx); - return -EWOULDBLOCK; - } + if (!ns) + goto out_unlock; if (is_ctrl_ioctl(cmd)) return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); ret = nvme_ns_ioctl(ns, cmd, argp); - nvme_put_ns_from_disk(head, srcu_idx); - +out_unlock: + srcu_read_unlock(&head->srcu, srcu_idx); return ret; } #endif /* CONFIG_NVME_MULTIPATH */ diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b5fbdb416022..0ea5298469c3 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -349,6 +349,25 @@ static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) nvme_put_ns_head(disk->private_data); } +#ifdef CONFIG_BLK_DEV_ZONED +static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} +#else +#define nvme_ns_head_report_zones NULL +#endif /* CONFIG_BLK_DEV_ZONED */ + const struct block_device_operations nvme_ns_head_ops = { .owner = THIS_MODULE, .submit_bio = nvme_ns_head_submit_bio, @@ -356,7 +375,7 @@ const struct block_device_operations nvme_ns_head_ops = { .release = nvme_ns_head_release, .ioctl = nvme_ns_head_ioctl, .getgeo = nvme_getgeo, - .report_zones = nvme_report_zones, + .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -416,11 +435,6 @@ static void nvme_requeue_work(struct work_struct *work) next = bio->bi_next; bio->bi_next = NULL; - /* - * Reset disk to the mpath node and resubmit to select a new - * path. - */ - bio_set_dev(bio, head->disk->part0); submit_bio_noacct(bio); } } @@ -779,6 +793,13 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; + if (!ctrl->max_namespaces || + ctrl->max_namespaces > le32_to_cpu(id->nn)) { + dev_err(ctrl->device, + "Invalid MNAN value %u\n", ctrl->max_namespaces); + return -EINVAL; + } + ctrl->anacap = id->anacap; ctrl->anatt = id->anatt; ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0015860ec12b..75420ceacc10 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -674,9 +674,6 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl); void nvme_queue_scan(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, void *log, size_t size, u64 offset); -struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, - struct nvme_ns_head **head, int *srcu_idx); -void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx); bool nvme_tryget_ns_head(struct nvme_ns_head *head); void nvme_put_ns_head(struct nvme_ns_head *head); int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, @@ -697,6 +694,7 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[]; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; +struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); #ifdef CONFIG_NVME_MULTIPATH static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) { @@ -718,7 +716,6 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl); void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); -struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { @@ -810,17 +807,14 @@ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) #endif /* CONFIG_NVME_MULTIPATH */ int nvme_revalidate_zones(struct nvme_ns *ns); +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); #ifdef CONFIG_BLK_DEV_ZONED int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); -int nvme_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); - blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_zone_mgmt_action action); #else -#define nvme_report_zones NULL - static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_zone_mgmt_action action) @@ -875,6 +869,11 @@ static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) } #endif +static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) +{ + return ctrl->sgls & ((1 << 0) | (1 << 1)); +} + u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); void nvme_execute_passthru_rq(struct request *rq); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a29b170701fc..d3c5086673bc 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -307,13 +307,12 @@ static void nvme_dbbuf_free(struct nvme_queue *nvmeq) static void nvme_dbbuf_set(struct nvme_dev *dev) { - struct nvme_command c; + struct nvme_command c = { }; unsigned int i; if (!dev->dbbuf_dbs) return; - memset(&c, 0, sizeof(c)); c.dbbuf.opcode = nvme_admin_dbbuf; c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); @@ -536,7 +535,7 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); - if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) + if (!nvme_ctrl_sgl_supported(&dev->ctrl)) return false; if (!iod->nvmeq->qid) return false; @@ -559,7 +558,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) @@ -576,7 +574,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); dma_addr = next_dma_addr; } - } static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) @@ -855,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, &cmnd->rw, &bv); if (iod->nvmeq->qid && sgl_threshold && - dev->ctrl.sgls & ((1 << 0) | (1 << 1))) + nvme_ctrl_sgl_supported(&dev->ctrl)) return nvme_setup_sgl_simple(dev, req, &cmnd->rw, &bv); } @@ -1032,7 +1029,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) { - u16 tmp = nvmeq->cq_head + 1; + u32 tmp = nvmeq->cq_head + 1; if (tmp == nvmeq->q_depth) { nvmeq->cq_head = 0; @@ -1114,9 +1111,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) { struct nvme_dev *dev = to_nvme_dev(ctrl); struct nvme_queue *nvmeq = &dev->queues[0]; - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; nvme_submit_cmd(nvmeq, &c, true); @@ -1124,9 +1120,8 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { - struct nvme_command c; + struct nvme_command c = { }; - memset(&c, 0, sizeof(c)); c.delete_queue.opcode = opcode; c.delete_queue.qid = cpu_to_le16(id); @@ -1136,7 +1131,7 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq, s16 vector) { - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) @@ -1146,7 +1141,6 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); c.create_cq.cqid = cpu_to_le16(qid); @@ -1161,7 +1155,7 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, struct nvme_queue *nvmeq) { struct nvme_ctrl *ctrl = &dev->ctrl; - struct nvme_command c; + struct nvme_command c = { }; int flags = NVME_QUEUE_PHYS_CONTIG; /* @@ -1176,7 +1170,6 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, * Note: we (ab)use the fact that the prp fields survive if no data * is attached to the request. */ - memset(&c, 0, sizeof(c)); c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); c.create_sq.sqid = cpu_to_le16(qid); @@ -1257,7 +1250,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_queue *nvmeq = iod->nvmeq; struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; - struct nvme_command cmd; + struct nvme_command cmd = { }; u32 csts = readl(dev->bar + NVME_REG_CSTS); /* If PCI error recovery process is happening, we cannot reset or @@ -1337,7 +1330,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) } iod->aborted = 1; - memset(&cmd, 0, sizeof(cmd)); cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = req->tag; cmd.abort.sqid = cpu_to_le16(nvmeq->qid); @@ -1888,10 +1880,9 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) { u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; u64 dma_addr = dev->host_mem_descs_dma; - struct nvme_command c; + struct nvme_command c = { }; int ret; - memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); c.features.dword11 = cpu_to_le32(bits); @@ -2265,9 +2256,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) { struct request_queue *q = nvmeq->dev->ctrl.admin_q; struct request *req; - struct nvme_command cmd; + struct nvme_command cmd = { }; - memset(&cmd, 0, sizeof(cmd)); cmd.delete_queue.opcode = opcode; cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); @@ -2828,54 +2818,6 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) return 0; } -#ifdef CONFIG_ACPI -static bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - struct acpi_device *adev; - struct pci_dev *root; - acpi_handle handle; - acpi_status status; - u8 val; - - /* - * Look for _DSD property specifying that the storage device on the port - * must use D3 to support deep platform power savings during - * suspend-to-idle. - */ - root = pcie_find_root_port(dev); - if (!root) - return false; - - adev = ACPI_COMPANION(&root->dev); - if (!adev) - return false; - - /* - * The property is defined in the PXSX device for South complex ports - * and in the PEGP device for North complex ports. - */ - status = acpi_get_handle(adev->handle, "PXSX", &handle); - if (ACPI_FAILURE(status)) { - status = acpi_get_handle(adev->handle, "PEGP", &handle); - if (ACPI_FAILURE(status)) - return false; - } - - if (acpi_bus_get_device(handle, &adev)) - return false; - - if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", - &val)) - return false; - return val == 1; -} -#else -static inline bool nvme_acpi_storage_d3(struct pci_dev *dev) -{ - return false; -} -#endif /* CONFIG_ACPI */ - static void nvme_async_probe(void *data, async_cookie_t cookie) { struct nvme_dev *dev = data; @@ -2925,7 +2867,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) quirks |= check_vendor_combination_bug(pdev); - if (!noacpi && nvme_acpi_storage_d3(pdev)) { + if (!noacpi && acpi_storage_d3(&pdev->dev)) { /* * Some systems use a bios work around to ask for D3 on * platforms that support kernel managed suspend. diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4697a94c0945..a9e70cefd7ed 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1088,7 +1088,7 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) { - int ret = -EINVAL; + int ret; bool changed; ret = nvme_rdma_configure_admin_queue(ctrl, new); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 34f4b3402f7c..c7bd37103cf4 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -123,6 +123,7 @@ struct nvme_tcp_ctrl { struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; + struct net_device *ndev; struct nvme_ctrl ctrl; struct work_struct err_work; @@ -1455,6 +1456,20 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, } } + if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { + char *iface = nctrl->opts->host_iface; + sockptr_t optval = KERNEL_SOCKPTR(iface); + + ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, + optval, strlen(iface)); + if (ret) { + dev_err(nctrl->device, + "failed to bind to interface %s queue %d err %d\n", + iface, qid, ret); + goto err_sock; + } + } + queue->hdr_digest = nctrl->opts->hdr_digest; queue->data_digest = nctrl->opts->data_digest; if (queue->hdr_digest || queue->data_digest) { @@ -1973,11 +1988,13 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) return ret; if (ctrl->icdoff) { + ret = -EOPNOTSUPP; dev_err(ctrl->device, "icdoff is not supported!\n"); goto destroy_admin; } - if (!(ctrl->sgls & ((1 << 0) | (1 << 1)))) { + if (!nvme_ctrl_sgl_supported(ctrl)) { + ret = -EOPNOTSUPP; dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); goto destroy_admin; } @@ -2515,6 +2532,16 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, } } + if (opts->mask & NVMF_OPT_HOST_IFACE) { + ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); + if (!ctrl->ndev) { + pr_err("invalid interface passed: %s\n", + opts->host_iface); + ret = -ENODEV; + goto out_free_ctrl; + } + } + if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { ret = -EALREADY; goto out_free_ctrl; @@ -2571,7 +2598,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = { NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | - NVMF_OPT_TOS, + NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, .create_ctrl = nvme_tcp_create_ctrl, }; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 475dd45c3db4..d95010481fce 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -171,8 +171,8 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, return cb(&zone, idx, data); } -static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) +int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data) { struct nvme_zone_report *report; struct nvme_command c = { }; @@ -180,6 +180,9 @@ static int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, unsigned int nz, i; size_t buflen; + if (ns->head->ids.csi != NVME_CSI_ZNS) + return -EINVAL; + report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); if (!report) return -ENOMEM; @@ -227,26 +230,6 @@ out_free: return ret; } -int nvme_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) -{ - struct nvme_ns_head *head = NULL; - struct nvme_ns *ns; - int srcu_idx, ret; - - ns = nvme_get_ns_from_disk(disk, &head, &srcu_idx); - if (unlikely(!ns)) - return -EWOULDBLOCK; - - if (ns->head->ids.csi == NVME_CSI_ZNS) - ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); - else - ret = -EINVAL; - nvme_put_ns_from_disk(head, srcu_idx); - - return ret; -} - blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, struct nvme_command *c, enum nvme_zone_mgmt_action action) { |